aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--file-tagger.py133
-rw-r--r--gui.py48
-rw-r--r--util.py31
3 files changed, 171 insertions, 41 deletions
diff --git a/file-tagger.py b/file-tagger.py
index 9708dfa..4d25e90 100644
--- a/file-tagger.py
+++ b/file-tagger.py
@@ -9,6 +9,7 @@ from tmsu import *
from util import *
from predictor import *
from PIL import Image
+import datetime
'''
Walk over all files for the given base directory and all subdirectories recursively.
@@ -29,7 +30,7 @@ def walk(tmsu, args):
logger.error("Invalid start index. index = {}, number of files = {}".format(args["index"], len(files)))
return
- if args["predict_images"]:
+ if args["predict_images"] or args["predict_videos"]:
backend = {
"torch": Predictor.BackendTorch,
"tensorflow": Predictor.BackendTensorflow,
@@ -47,6 +48,20 @@ def walk(tmsu, args):
not_empty = bool(tags)
logger.info("Existing tags: {}".format(tags))
+ if ".tmsu" in file_path:
+ logger.info("Database meta file, skipping.")
+ continue
+
+ logger.info("Renaming file {}".format(file_path))
+ file_path = files[i] = {
+ "none": lambda x: x,
+ "sha1": rename_sha1,
+ "sha256": rename_sha256,
+ "cdate": rename_cdate,
+ "mdate": rename_mdate
+ }.get(args["rename"])(file_path)
+ logger.info("New file name: {}".format(file_path))
+
if (not_empty and args["skip_tagged"]):
logger.info("Already tagged, skipping.")
continue
@@ -54,34 +69,95 @@ def walk(tmsu, args):
if args["open_system"]:
open_system(file_path)
+ if args["tag_metadata"]:
+ # Base name and extension
+ base = os.path.splitext(os.path.basename(file_path))
+ if base[1]:
+ tags.update({base[0], base[1]})
+ else:
+ tags.update({base[0]})
+ # File creation and modification time
+ time_c = datetime.datetime.fromtimestamp(os.path.getctime(file_path))
+ time_m = datetime.datetime.fromtimestamp(os.path.getmtime(file_path))
+ tags.update({time_c.strftime("%Y-%m-%d"),
+ time_c.strftime("%Y"),
+ time_c.strftime("%B"),
+ time_c.strftime("%A"),
+ time_c.strftime("%Hh")})
+ if time_c != time_m:
+ tags.update({time_m.strftime("%Y-%m-%d"),
+ time_m.strftime("%Y"),
+ time_m.strftime("%B"),
+ time_m.strftime("%A"),
+ time_m.strftime("%Hh")})
+
# Detect MIME-type for file
- mime_type = mime.from_file(file_path)
+ mime_type = mime.from_file(file_path).split("/")
+
+ tags.update(mime_type)
# Handle images
- if mime_type.split("/")[0] == "image":
+ if mime_type[0] == "image":
logger.debug("File is image")
- img = cv2.imread(file_path)
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- if args["predict_images"]:
- logger.info("Predicting image tags ...")
- tags_predict = predictor.predict(img)
- logger.info("Predicted tags: {}".format(tags_predict))
- tags.update(tags_predict)
- if args["gui_tag"]:
- while(True): # For GUI inputs (rotate, ...)
- logger.debug("Showing image GUI ...")
- img_show = image_resize(img, width=args["gui_image_length"]) if img.shape[1] > img.shape[0] else image_resize(img, height=args["gui_image_length"])
- #img_show = cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB)
- ret = GuiImage(i, file_path, img_show, tags).loop()
- tags = set(ret[1]).difference({''})
- if ret[0] == GuiImage.RETURN_ROTATE_90_CLOCKWISE:
- img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
- elif ret[0] == GuiImage.RETURN_ROTATE_90_COUNTERCLOCKWISE:
- img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
- elif ret[0] == GuiImage.RETURN_NEXT:
+ if args["predict_images"] or args["gui_tag"]:
+ img = cv2.imread(file_path)
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ if args["predict_images"]:
+ logger.info("Predicting image tags ...")
+ tags_predict = predictor.predict(img)
+ logger.info("Predicted tags: {}".format(tags_predict))
+ tags.update(tags_predict)
+ if args["gui_tag"]:
+ while(True): # For GUI inputs (rotate, ...)
+ logger.debug("Showing image GUI ...")
+ img_show = image_resize(img, width=args["gui_image_length"]) if img.shape[1] > img.shape[0] else image_resize(img, height=args["gui_image_length"])
+ #img_show = cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB)
+ ret = GuiImage(i, file_path, img_show, tags).loop()
+ tags = set(ret[1]).difference({''})
+ if ret[0] == GuiImage.RETURN_ROTATE_90_CLOCKWISE:
+ img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
+ elif ret[0] == GuiImage.RETURN_ROTATE_90_COUNTERCLOCKWISE:
+ img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
+ elif ret[0] == GuiImage.RETURN_NEXT:
+ break
+ elif ret[0] == GuiImage.RETURN_ABORT:
+ return
+ elif mime_type[0] == "video":
+ logger.debug("File is video")
+ if args["predict_videos"] or args["gui_tag"]:
+ cap = cv2.VideoCapture(file_path)
+ n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
+ step = n_frames / args["predict_videos_key_frames"]
+ print(step)
+ preview = None
+ for frame in np.arange(0, n_frames, step):
+ cap.set(cv2.CAP_PROP_POS_FRAMES, max(-1, round(frame - 1)))
+ _, f = cap.read()
+ f = cv2.cvtColor(f, cv2.COLOR_BGR2RGB)
+ if frame == 0:
+ preview = f
+ if args["predict_videos"]:
+ logger.info("Predictig video frame {} of {}".format(frame, n_frames))
+ tags_predict = predictor.predict(f)
+ logger.info("Predicted tags: {}".format(tags_predict))
+ tags.update(tags_predict)
+ else:
break
- elif ret[0] == GuiImage.RETURN_ABORT:
- return
+ if args["gui_tag"]:
+ while(True): # For GUI inputs (rotate, ...)
+ logger.debug("Showing image GUI ...")
+ img_show = image_resize(preview, width=args["gui_image_length"]) if preview.shape[1] > preview.shape[0] else image_resize(preview, height=args["gui_image_length"])
+ #img_show = cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB)
+ ret = GuiImage(i, file_path, img_show, tags).loop()
+ tags = set(ret[1]).difference({''})
+ if ret[0] == GuiImage.RETURN_ROTATE_90_CLOCKWISE:
+ preview = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
+ elif ret[0] == GuiImage.RETURN_ROTATE_90_COUNTERCLOCKWISE:
+ preview = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
+ elif ret[0] == GuiImage.RETURN_NEXT:
+ break
+ elif ret[0] == GuiImage.RETURN_ABORT:
+ return
else:
if args["gui_tag"]:
while(True):
@@ -96,6 +172,7 @@ def walk(tmsu, args):
if ((not args["gui_tag"]) and (not args["skip_prompt"])):
tags = set(input_with_prefill("\nTags for file {}:\n".format(file_path), ','.join(tags)).split(","))
+ tags = { tag.lower().replace(" ", "_") for tag in tags }
logger.info("Tagging {}".format(tags))
tmsu.tag(file_path, tags, untag=not_empty)
@@ -106,11 +183,15 @@ if __name__ == "__main__":
parser.add_argument('-f', '--file-dir', nargs='?', default='.', type=dir_path, help='File directory for walking (default: %(default)s)')
parser.add_argument('-g', '--gui', nargs='?', const=1, default=False, type=bool, help='Show main GUI (default: %(default)s)')
parser.add_argument('--tmsu-command', nargs='?', const=1, default="tmsu", type=str, help='TMSU command override (default: %(default)s)')
+ parser.add_argument('-r', '--rename', nargs='?', const=1, choices=["none", "sha1", "sha256", "cdate", "mdate"], default="none", type=str.lower, help='Rename files based on given scheme (default: %(default)s)')
+ parser.add_argument('--tag-metadata', nargs='?', const=1, default=True, type=bool, help='Use metadata as default tags (default: %(default)s)')
parser.add_argument('--predict-images', nargs='?', const=1, default=False, type=bool, help='Use prediction for image tagging (default: %(default)s)')
parser.add_argument('--predict-images-backend', nargs='?', const=1, choices=["torch", "tensorflow", "keras"], default="torch", type=str.lower, help='Determines which backend should be used for keyword prediction (default: %(default)s)')
parser.add_argument('--predict-images-top', nargs='?', const=1, default=10, type=int, help='Defines how many top prediction keywords should be used (default: %(default)s)')
parser.add_argument('--predict-images-detail-factor', nargs='?', const=1, default=2, type=int, help='Width factor for detail scan, multiplied by 224 for ResNet50 (default: %(default)s)')
parser.add_argument('--predict-images-skip-detail', nargs='?', const=1, default=False, type=bool, help='Skip detail scan in image prediction (default: %(default)s)')
+ parser.add_argument('--predict-videos', nargs='?', const=1, default=False, type=bool, help='Use prediction for video tagging (default: %(default)s)')
+ parser.add_argument('--predict-videos-key-frames', nargs='?', const=1, default=5, type=int, help='Defines how many key frames are used to predict videos (default: %(default)s)')
parser.add_argument('--gui-tag', nargs='?', const=1, default=False, type=bool, help='Show GUI for tagging (default: %(default)s)')
parser.add_argument('--gui-image-length', nargs='?', const=1, default=800, type=int, help='Length of longest side for preview (default: %(default)s)')
parser.add_argument('--open-system', nargs='?', const=1, default=False, type=bool, help='Open all files with system default (default: %(default)s)')
@@ -135,11 +216,15 @@ if __name__ == "__main__":
"file_dir": args.file_dir,
"gui": args.gui,
"tmsu_command": args.tmsu_command,
+ "rename": args.rename,
+ "tag_metadata": args.tag_metadata,
"predict_images": args.predict_images,
"predict_images_backend": args.predict_images_backend,
"predict_images_top": args.predict_images_top,
"predict_images_detail_factor": args.predict_images_detail_factor,
"predict_images_skip_detail": args.predict_images_skip_detail,
+ "predict_videos": args.predict_videos,
+ "predict_videos_key_frames": args.predict_videos_key_frames,
"gui_tag": args.gui_tag,
"gui_image_length": args.gui_image_length,
"open_system": args.open_system,
diff --git a/gui.py b/gui.py
index 7bd30ad..19d265c 100644
--- a/gui.py
+++ b/gui.py
@@ -13,11 +13,15 @@ class GuiMain(object):
self.__args = args
self.__base = StringVar(self.__master, value=args["base"])
self.__file_dir = StringVar(self.__master, value=args["file_dir"])
+ self.__rename = StringVar(self.__master, value=str(args["rename"]))
+ self.__tag_metadata = BooleanVar(self.__master, value=args["tag_metadata"])
self.__predict_images = BooleanVar(self.__master, value=args["predict_images"])
self.__predict_images_backend = StringVar(self.__master, value=str(args["predict_images_backend"]))
self.__predict_images_top = StringVar(self.__master, value=str(args["predict_images_top"]))
self.__predict_images_skip_detail = BooleanVar(self.__master, value=args["predict_images_skip_detail"])
self.__predict_images_detail_factor = StringVar(self.__master, value=str(args["predict_images_detail_factor"]))
+ self.__predict_videos = BooleanVar(self.__master, value=args["predict_videos"])
+ self.__predict_videos_key_frames = StringVar(self.__master, value=str(args["predict_videos_key_frames"]))
self.__gui_tag = BooleanVar(self.__master, value=args["gui_tag"])
self.__gui_image_length = StringVar(self.__master, value=str(args["gui_image_length"]))
self.__open_system = BooleanVar(self.__master, value=args["open_system"])
@@ -33,34 +37,44 @@ class GuiMain(object):
Label(self.__master, text="File directory for walking:").grid(row=1, column=0)
Entry(self.__master, textvariable=self.__file_dir).grid(row=1, column=1, columnspan=2)
Button(self.__master, text="Browse", command=lambda: self.__browse(self.__file_dir)).grid(row=1, column=3)
- Checkbutton(self.__master, text="Use prediction for image tagging", variable=self.__predict_images).grid(row=2, column=0, columnspan=4, sticky=W)
- Label(self.__master, text="Image prediction backend:").grid(row=3, column=0)
- ttk.Combobox(self.__master, textvariable=self.__predict_images_backend, values=("torch", "tensorflow", "keras"), state="readonly").grid(row=3, column=1)
- Label(self.__master, text="Number of top results:").grid(row=4, column=0)
- Entry(self.__master, textvariable=self.__predict_images_top, validate='all', validatecommand=(validate_number, '%P')).grid(row=4, column=1, columnspan=1)
- Checkbutton(self.__master, text="Skip detail scan in image prediction", variable=self.__predict_images_skip_detail).grid(row=5, column=0, columnspan=4, sticky=W)
- Label(self.__master, text="Width factor for detail scan:").grid(row=6, column=0)
- Entry(self.__master, textvariable=self.__predict_images_detail_factor, validate='all', validatecommand=(validate_number, '%P')).grid(row=6, column=1, columnspan=1)
- Checkbutton(self.__master, text="Show GUI for tagging", variable=self.__gui_tag).grid(row=7, column=0, columnspan=4, sticky=W)
- Label(self.__master, text="Image GUI preview size:").grid(row=8, column=0)
- Entry(self.__master, textvariable=self.__gui_image_length, validate='all', validatecommand=(validate_number, '%P')).grid(row=8, column=1, columnspan=1)
- Checkbutton(self.__master, text="Open all files with system default", variable=self.__open_system).grid(row=9, column=0, columnspan=4, sticky=W)
- Checkbutton(self.__master, text="Skip prompt for file tags", variable=self.__skip_prompt).grid(row=10, column=0, columnspan=4, sticky=W)
- Checkbutton(self.__master, text="Skip already tagged files", variable=self.__skip_tagged).grid(row=11, column=0, columnspan=4, sticky=W)
- Label(self.__master, text="Start at index:").grid(row=12, column=0)
- Entry(self.__master, textvariable=self.__index, validate='all', validatecommand=(validate_number, '%P')).grid(row=12, column=1, columnspan=1)
- Button(self.__master, text="Start", command=self.__master.destroy).grid(row=13, column=0, columnspan=4)
+ Label(self.__master, text="File rename scheme:").grid(row=2, column=0)
+ ttk.Combobox(self.__master, textvariable=self.__rename, values=("none", "sha1", "sha256", "cdate", "mdate"), state="readonly").grid(row=2, column=1)
+ Checkbutton(self.__master, text="Use metadata as default tags", variable=self.__tag_metadata).grid(row=3, column=0, columnspan=4, sticky=W)
+ Checkbutton(self.__master, text="Use prediction for image tagging", variable=self.__predict_images).grid(row=4, column=0, columnspan=4, sticky=W)
+ Label(self.__master, text="Image prediction backend:").grid(row=5, column=0)
+ ttk.Combobox(self.__master, textvariable=self.__predict_images_backend, values=("torch", "tensorflow", "keras"), state="readonly").grid(row=5, column=1)
+ Label(self.__master, text="Number of top results:").grid(row=6, column=0)
+ Entry(self.__master, textvariable=self.__predict_images_top, validate='all', validatecommand=(validate_number, '%P')).grid(row=6, column=1, columnspan=1)
+ Checkbutton(self.__master, text="Skip detail scan in image prediction", variable=self.__predict_images_skip_detail).grid(row=7, column=0, columnspan=4, sticky=W)
+ Label(self.__master, text="Width factor for detail scan:").grid(row=8, column=0)
+ Entry(self.__master, textvariable=self.__predict_images_detail_factor, validate='all', validatecommand=(validate_number, '%P')).grid(row=8, column=1, columnspan=1)
+ Checkbutton(self.__master, text="Use prediction for video tagging", variable=self.__predict_videos).grid(row=9, column=0, columnspan=4, sticky=W)
+ Label(self.__master, text="Number of key frames:").grid(row=10, column=0)
+ Entry(self.__master, textvariable=self.__predict_videos_key_frames, validate='all', validatecommand=(validate_number, '%P')).grid(row=10, column=1, columnspan=1)
+ Checkbutton(self.__master, text="Show GUI for tagging", variable=self.__gui_tag).grid(row=11, column=0, columnspan=4, sticky=W)
+ Label(self.__master, text="Image GUI preview size:").grid(row=12, column=0)
+ Entry(self.__master, textvariable=self.__gui_image_length, validate='all', validatecommand=(validate_number, '%P')).grid(row=12, column=1, columnspan=1)
+ Checkbutton(self.__master, text="Open all files with system default", variable=self.__open_system).grid(row=13, column=0, columnspan=4, sticky=W)
+ Checkbutton(self.__master, text="Skip prompt for file tags", variable=self.__skip_prompt).grid(row=14, column=0, columnspan=4, sticky=W)
+ Checkbutton(self.__master, text="Skip already tagged files", variable=self.__skip_tagged).grid(row=15, column=0, columnspan=4, sticky=W)
+ Label(self.__master, text="Start at index:").grid(row=16, column=0)
+ Entry(self.__master, textvariable=self.__index, validate='all', validatecommand=(validate_number, '%P')).grid(row=16, column=1, columnspan=1)
+ Button(self.__master, text="Start", command=self.__master.destroy).grid(row=17, column=0, columnspan=4)
def loop(self):
self.__master.mainloop()
self.__args["base"] = self.__base.get()
self.__args["file_dir"] = self.__file_dir.get()
+ self.__args["tag_metadata"] = self.__tag_metadata.get()
+ self.__args["rename"] = self.__rename.get()
self.__args["predict_images"] = self.__predict_images.get()
self.__args["predict_images_backend"] = self.__predict_images_backend.get()
self.__args["predict_images_top"] = int(self.__predict_images_top.get())
self.__args["predict_images_skip_detail"] = self.__predict_images_skip_detail.get()
self.__args["predict_images_detail_factor"] = int(self.__predict_images_detail_factor.get())
+ self.__args["predict_videos"] = self.__predict_videos.get()
+ self.__args["predict_videos_key_frames"] = int(self.__predict_videos_key_frames.get())
self.__args["gui_tag"] = self.__gui_tag.get()
self.__args["gui_image_length"] = int(self.__gui_image_length.get())
self.__args["open_system"] = self.__open_system.get()
diff --git a/util.py b/util.py
index 9fca80c..243d78e 100644
--- a/util.py
+++ b/util.py
@@ -7,6 +7,37 @@ import os
import numpy as np
from queue import Queue
from threading import Thread, Lock
+import hashlib
+import datetime
+
+BUF_SIZE = 65535
+
+def rename(old, new_base):
+ ext = os.path.splitext(os.path.basename(old))[1]
+ new_fpath = os.path.join(os.path.dirname(old), new_base + ext)
+ os.rename(old, new_fpath)
+ return new_fpath
+
+def rename_hash(fpath, hash):
+ with open(fpath, 'rb') as f:
+ while True:
+ data = f.read(BUF_SIZE)
+ if not data:
+ break
+ hash.update(data)
+ return rename(fpath, hash.hexdigest())
+
+def rename_sha1(fpath):
+ return rename_hash(fpath, hashlib.sha1())
+
+def rename_sha256(fpath):
+ return rename_hash(fpath, hashlib.sha256())
+
+def rename_cdate(fpath):
+ return rename(fpath, datetime.datetime.fromtimestamp(os.path.getctime(fpath)).strftime("%Y-%m-%d-%H-%M-%S-%f"))
+
+def rename_mdate(fpath):
+ return rename(fpath, datetime.datetime.fromtimestamp(os.path.getmtime(fpath)).strftime("%Y-%m-%d-%H-%M-%S-%f"))
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and