dupecheck.py

Thu, 04 Oct 2018 00:43:26 +0200

author
mdd
date
Thu, 04 Oct 2018 00:43:26 +0200
changeset 34
344802cf307d
parent 33
83bcb5931ee3
child 35
14c966c10648
permissions
-rwxr-xr-x

fix: cropdetect

#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Toolkit / executable to scan for duplicate filenames in movie database

2017 by mdd
"""

#pylint: disable=line-too-long
#pylint: disable=invalid-name

from __future__ import print_function
import os, sys

def similarity(a, b):
    if DIFFLIB:
        return difflib.SequenceMatcher(a=a, b=b).ratio()
    else:
        return Levenshtein.ratio(a, b)

suffixes = ['b', 'K', 'M', 'G', 'T', 'P']
def humansize(nbytes):
    i = 0
    while nbytes >= 1024 and i < len(suffixes)-1:
        nbytes /= 1024.
        i += 1
    f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
    return '%s %s' % (f, suffixes[i])

def replace_all(text, dic):
    for i, j in dic.iteritems():
        text = text.replace(i, j)
    return text

class dupechecker(object):
    """
    Simple class to scan multiple directories recursive,
    build a list of movie filenames.
    analyze the list for duplicates and dump them
    """
    def __init__(self):
        self.basedir = ""
        self.filelist = []
        self.duplicates = {}
        self.ratio = 0.85


    def reset(self):
        self.filelist = []
        self.duplicates = {}

    def scandir(self, basedir, extra=[]):
        """
        Scan a base directory for movie files and add them to
        the list for analyze
        """
        self.basedir = basedir
        print("Scanning directory: %s" % basedir)
        for root, subdirs, files in os.walk(basedir):
            for filename in files:
                ext = os.path.splitext(filename)[1].lower()
                if ext == ".ts":
                    #file_path = os.path.join(root, filename)
                    title = filename.split(" - ")
                    if len(title) == 1:
                        title = title[0]
                    else:
                        title = " - ".join(title[2:])
                    title = title[:-3].lower()
                    self.filelist.append([title, filename, root, ext])
                elif ext in ['.mkv', '.avi', '.mpg', '.mpeg', '.mp4']:
                    title = filename[:-4].lower()
                    self.filelist.append([title, filename, root, ext])
                elif ext in extra:
                    title = filename[:-4].lower()
                    self.filelist.append([title, filename, root, ext])

    def fixnames(self):
        """
        Search for defect filenames and remove illegal characters
        """
        import re
        for item in self.filelist:
            if not item[3] in ['.mkv', '.txt']:
                continue
            # any non-alphanumeric characters in filename?
            cleanfn = replace_all(item[1], {
                    #'ä':'ae', 'Ä':'Ae',
                    #'ö':'oe', 'Ö':'Oe',
                    #'ü':'ue', 'Ü':'Ue',
                    'ß':'ss',
                })
            cleanfn = re.sub(r'[^A-Za-z0-9\.\_\-\(\)\&öäüÖÄÜ\' ]', '-', cleanfn)
            if item[1] == cleanfn:
                continue
            print (item[1])
            os.rename(
                os.path.join(item[2], item[1]),
                os.path.join(item[2], cleanfn)
                )

    def statistics(self):
        """
        Summarize disk usage and print stats about found filetypes
        """
        stats = {}
        for item in self.filelist:
            if not item[3] in stats:
                stats[item[3]] = [0, 0.0]
            stats[item[3]][0] += 1
            stats[item[3]][1] += os.stat(
                os.path.join(
                    item[2], item[1])).st_size
        print ("%5s %6s %10s" % (
            "File:",
            "Count:",
            "Size:"))
        sum_count = 0
        sum_size = 0.0
        for ext in stats.keys():
            sum_count += stats[ext][0]
            sum_size += stats[ext][1]
            print ("%5s %6i %10s" % (
                ext, stats[ext][0],
                humansize(stats[ext][1])))
        print ("%5s %6i %10s" % (
            "TOTAL", sum_count,
            humansize(sum_size)))


    def analyze(self):
        """
        Analyze the scanlist for duplicates
        """
        print("%i files to analyze, running duplicate testing loop..." % (
            len(self.filelist)))

        listlen = len(self.filelist)
        for idx in range(listlen):
            if not self.filelist[idx]:
                continue
            print("\r%d %s\033[K" % (
                idx, self.filelist[idx][0]), end='')
            sys.stdout.flush()
            for idx2 in range(idx + 1, listlen):
                if self.filelist[idx2]:
                    if similarity(self.filelist[idx][0], self.filelist[idx2][0]) > self.ratio:
                        #print "possible duplicate %d %s" % (idx2, item2[0])
                        key = os.path.join(self.filelist[idx][2], self.filelist[idx][1])
                        if not key in self.duplicates:
                            self.duplicates[key] = []
                        self.duplicates[key].append(
                            os.path.join(
                                self.filelist[idx2][2],
                                self.filelist[idx2][1]
                            ))
                        # unset the found duplicate, so that this will not be scanned again
                        self.filelist[idx2] = None
        print("\n\n")

    def output(self):
        """
        Dump found duplicates to console
        """
        idx = 1
        for base in self.duplicates:
            print("Duplicate file set #%i" % idx)
            print(base)
            for dup in self.duplicates[base]:
                print(dup)
            print()
            idx += 1


if __name__ == "__main__":
    # parse command line options
    import argparse

    parser = argparse.ArgumentParser(\
        description='Movie database filename duplicate checker')
    parser.add_argument('--ratio', type=float, default=0.85, \
        help='filename duplicate threshold 0.1 < ratio 1.0 (default 0.85)')
    parser.add_argument('--difflib', action='store_true', default=False, \
        help='force the use of difflib instead Levenshtein')
    parser.add_argument('--stats', action='store_true', default=False, \
        help='generate stats summary instead of check for duplicates')
    parser.add_argument('--fixnames', action='store_true', default=False, \
        help='scan for mkv and txt, fix broken filenames for windows')
    parser.add_argument('basedir', metavar='basedir', nargs='+', \
        help='one or more base directories')

    args = parser.parse_args()
    dupe = dupechecker()
    dupe.ratio = args.ratio
    if args.difflib:
        DIFFLIB = True
        import difflib
    else:
        try:
            import Levenshtein
            DIFFLIB = False
        except ImportError:
            import difflib
            DIFFLIB = True
            print("Consider 'pip install python-Levenshtein' for faster analyze")


    if args.fixnames:
        for srcstr in args.basedir:
            dupe.scandir(srcstr, ['.txt'])
        if len(dupe.filelist) > 0:
            print ("Checking %i file names..." % len(dupe.filelist))
            dupe.fixnames()
            dupe.filelist = []
        sys.exit(0)

    for srcstr in args.basedir:
        dupe.scandir(srcstr)

    if args.stats or args.fixnames:
        dupe.statistics()
    else:
        dupe.analyze()
        dupe.output()

mercurial