aboutsummaryrefslogtreecommitdiffstats
path: root/file-tagger.py
diff options
context:
space:
mode:
Diffstat (limited to 'file-tagger.py')
-rw-r--r--file-tagger.py133
1 files changed, 109 insertions, 24 deletions
diff --git a/file-tagger.py b/file-tagger.py
index 9708dfa..4d25e90 100644
--- a/file-tagger.py
+++ b/file-tagger.py
@@ -9,6 +9,7 @@ from tmsu import *
from util import *
from predictor import *
from PIL import Image
+import datetime
'''
Walk over all files for the given base directory and all subdirectories recursively.
@@ -29,7 +30,7 @@ def walk(tmsu, args):
logger.error("Invalid start index. index = {}, number of files = {}".format(args["index"], len(files)))
return
- if args["predict_images"]:
+ if args["predict_images"] or args["predict_videos"]:
backend = {
"torch": Predictor.BackendTorch,
"tensorflow": Predictor.BackendTensorflow,
@@ -47,6 +48,20 @@ def walk(tmsu, args):
not_empty = bool(tags)
logger.info("Existing tags: {}".format(tags))
+ if ".tmsu" in file_path:
+ logger.info("Database meta file, skipping.")
+ continue
+
+ logger.info("Renaming file {}".format(file_path))
+ file_path = files[i] = {
+ "none": lambda x: x,
+ "sha1": rename_sha1,
+ "sha256": rename_sha256,
+ "cdate": rename_cdate,
+ "mdate": rename_mdate
+ }.get(args["rename"])(file_path)
+ logger.info("New file name: {}".format(file_path))
+
if (not_empty and args["skip_tagged"]):
logger.info("Already tagged, skipping.")
continue
@@ -54,34 +69,95 @@ def walk(tmsu, args):
if args["open_system"]:
open_system(file_path)
+ if args["tag_metadata"]:
+ # Base name and extension
+ base = os.path.splitext(os.path.basename(file_path))
+ if base[1]:
+ tags.update({base[0], base[1]})
+ else:
+ tags.update({base[0]})
+ # File creation and modification time
+ time_c = datetime.datetime.fromtimestamp(os.path.getctime(file_path))
+ time_m = datetime.datetime.fromtimestamp(os.path.getmtime(file_path))
+ tags.update({time_c.strftime("%Y-%m-%d"),
+ time_c.strftime("%Y"),
+ time_c.strftime("%B"),
+ time_c.strftime("%A"),
+ time_c.strftime("%Hh")})
+ if time_c != time_m:
+ tags.update({time_m.strftime("%Y-%m-%d"),
+ time_m.strftime("%Y"),
+ time_m.strftime("%B"),
+ time_m.strftime("%A"),
+ time_m.strftime("%Hh")})
+
# Detect MIME-type for file
- mime_type = mime.from_file(file_path)
+ mime_type = mime.from_file(file_path).split("/")
+
+ tags.update(mime_type)
# Handle images
- if mime_type.split("/")[0] == "image":
+ if mime_type[0] == "image":
logger.debug("File is image")
- img = cv2.imread(file_path)
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- if args["predict_images"]:
- logger.info("Predicting image tags ...")
- tags_predict = predictor.predict(img)
- logger.info("Predicted tags: {}".format(tags_predict))
- tags.update(tags_predict)
- if args["gui_tag"]:
- while(True): # For GUI inputs (rotate, ...)
- logger.debug("Showing image GUI ...")
- img_show = image_resize(img, width=args["gui_image_length"]) if img.shape[1] > img.shape[0] else image_resize(img, height=args["gui_image_length"])
- #img_show = cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB)
- ret = GuiImage(i, file_path, img_show, tags).loop()
- tags = set(ret[1]).difference({''})
- if ret[0] == GuiImage.RETURN_ROTATE_90_CLOCKWISE:
- img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
- elif ret[0] == GuiImage.RETURN_ROTATE_90_COUNTERCLOCKWISE:
- img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
- elif ret[0] == GuiImage.RETURN_NEXT:
+ if args["predict_images"] or args["gui_tag"]:
+ img = cv2.imread(file_path)
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ if args["predict_images"]:
+ logger.info("Predicting image tags ...")
+ tags_predict = predictor.predict(img)
+ logger.info("Predicted tags: {}".format(tags_predict))
+ tags.update(tags_predict)
+ if args["gui_tag"]:
+ while(True): # For GUI inputs (rotate, ...)
+ logger.debug("Showing image GUI ...")
+ img_show = image_resize(img, width=args["gui_image_length"]) if img.shape[1] > img.shape[0] else image_resize(img, height=args["gui_image_length"])
+ #img_show = cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB)
+ ret = GuiImage(i, file_path, img_show, tags).loop()
+ tags = set(ret[1]).difference({''})
+ if ret[0] == GuiImage.RETURN_ROTATE_90_CLOCKWISE:
+ img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
+ elif ret[0] == GuiImage.RETURN_ROTATE_90_COUNTERCLOCKWISE:
+ img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
+ elif ret[0] == GuiImage.RETURN_NEXT:
+ break
+ elif ret[0] == GuiImage.RETURN_ABORT:
+ return
+ elif mime_type[0] == "video":
+ logger.debug("File is video")
+ if args["predict_videos"] or args["gui_tag"]:
+ cap = cv2.VideoCapture(file_path)
+ n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
+ step = n_frames / args["predict_videos_key_frames"]
+ print(step)
+ preview = None
+ for frame in np.arange(0, n_frames, step):
+ cap.set(cv2.CAP_PROP_POS_FRAMES, max(-1, round(frame - 1)))
+ _, f = cap.read()
+ f = cv2.cvtColor(f, cv2.COLOR_BGR2RGB)
+ if frame == 0:
+ preview = f
+ if args["predict_videos"]:
+ logger.info("Predictig video frame {} of {}".format(frame, n_frames))
+ tags_predict = predictor.predict(f)
+ logger.info("Predicted tags: {}".format(tags_predict))
+ tags.update(tags_predict)
+ else:
break
- elif ret[0] == GuiImage.RETURN_ABORT:
- return
+ if args["gui_tag"]:
+ while(True): # For GUI inputs (rotate, ...)
+ logger.debug("Showing image GUI ...")
+ img_show = image_resize(preview, width=args["gui_image_length"]) if preview.shape[1] > preview.shape[0] else image_resize(preview, height=args["gui_image_length"])
+ #img_show = cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB)
+ ret = GuiImage(i, file_path, img_show, tags).loop()
+ tags = set(ret[1]).difference({''})
+ if ret[0] == GuiImage.RETURN_ROTATE_90_CLOCKWISE:
+ preview = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
+ elif ret[0] == GuiImage.RETURN_ROTATE_90_COUNTERCLOCKWISE:
+ preview = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
+ elif ret[0] == GuiImage.RETURN_NEXT:
+ break
+ elif ret[0] == GuiImage.RETURN_ABORT:
+ return
else:
if args["gui_tag"]:
while(True):
@@ -96,6 +172,7 @@ def walk(tmsu, args):
if ((not args["gui_tag"]) and (not args["skip_prompt"])):
tags = set(input_with_prefill("\nTags for file {}:\n".format(file_path), ','.join(tags)).split(","))
+ tags = { tag.lower().replace(" ", "_") for tag in tags }
logger.info("Tagging {}".format(tags))
tmsu.tag(file_path, tags, untag=not_empty)
@@ -106,11 +183,15 @@ if __name__ == "__main__":
parser.add_argument('-f', '--file-dir', nargs='?', default='.', type=dir_path, help='File directory for walking (default: %(default)s)')
parser.add_argument('-g', '--gui', nargs='?', const=1, default=False, type=bool, help='Show main GUI (default: %(default)s)')
parser.add_argument('--tmsu-command', nargs='?', const=1, default="tmsu", type=str, help='TMSU command override (default: %(default)s)')
+ parser.add_argument('-r', '--rename', nargs='?', const=1, choices=["none", "sha1", "sha256", "cdate", "mdate"], default="none", type=str.lower, help='Rename files based on given scheme (default: %(default)s)')
+ parser.add_argument('--tag-metadata', nargs='?', const=1, default=True, type=bool, help='Use metadata as default tags (default: %(default)s)')
parser.add_argument('--predict-images', nargs='?', const=1, default=False, type=bool, help='Use prediction for image tagging (default: %(default)s)')
parser.add_argument('--predict-images-backend', nargs='?', const=1, choices=["torch", "tensorflow", "keras"], default="torch", type=str.lower, help='Determines which backend should be used for keyword prediction (default: %(default)s)')
parser.add_argument('--predict-images-top', nargs='?', const=1, default=10, type=int, help='Defines how many top prediction keywords should be used (default: %(default)s)')
parser.add_argument('--predict-images-detail-factor', nargs='?', const=1, default=2, type=int, help='Width factor for detail scan, multiplied by 224 for ResNet50 (default: %(default)s)')
parser.add_argument('--predict-images-skip-detail', nargs='?', const=1, default=False, type=bool, help='Skip detail scan in image prediction (default: %(default)s)')
+ parser.add_argument('--predict-videos', nargs='?', const=1, default=False, type=bool, help='Use prediction for video tagging (default: %(default)s)')
+ parser.add_argument('--predict-videos-key-frames', nargs='?', const=1, default=5, type=int, help='Defines how many key frames are used to predict videos (default: %(default)s)')
parser.add_argument('--gui-tag', nargs='?', const=1, default=False, type=bool, help='Show GUI for tagging (default: %(default)s)')
parser.add_argument('--gui-image-length', nargs='?', const=1, default=800, type=int, help='Length of longest side for preview (default: %(default)s)')
parser.add_argument('--open-system', nargs='?', const=1, default=False, type=bool, help='Open all files with system default (default: %(default)s)')
@@ -135,11 +216,15 @@ if __name__ == "__main__":
"file_dir": args.file_dir,
"gui": args.gui,
"tmsu_command": args.tmsu_command,
+ "rename": args.rename,
+ "tag_metadata": args.tag_metadata,
"predict_images": args.predict_images,
"predict_images_backend": args.predict_images_backend,
"predict_images_top": args.predict_images_top,
"predict_images_detail_factor": args.predict_images_detail_factor,
"predict_images_skip_detail": args.predict_images_skip_detail,
+ "predict_videos": args.predict_videos,
+ "predict_videos_key_frames": args.predict_videos_key_frames,
"gui_tag": args.gui_tag,
"gui_image_length": args.gui_image_length,
"open_system": args.open_system,