46
46
import settings as settings_file
47
47
48
48
# Version of the script
49
- script_version = (2 , 5 , 29 )
49
+ script_version = (2 , 5 , 30 )
50
50
script_version_text = "v{}.{}.{}" .format (* script_version )
51
51
52
52
# Paths = existing library
163
163
".epub" ,
164
164
]
165
165
166
+ # Rar extensions
166
167
rar_extensions = [".rar" , ".cbr" ]
167
168
168
169
# Accepted file extensions for novels
@@ -323,7 +324,7 @@ def __str__(self):
323
324
r"Tail -" ,
324
325
r"꞉" ,
325
326
r":" ,
326
- r"\d\."
327
+ r"\d\." ,
327
328
]
328
329
329
330
subtitle_exclusion_keywords = [r"-(\s)" , r"-" , r"-\s[A-Za-z]+\s" ]
@@ -349,13 +350,13 @@ def __str__(self):
349
350
350
351
### EXTENION REGEX ###
351
352
# File extensions regex to be used throughout the script
352
- file_extensions_regex = "|" .join (file_extensions ).replace ("." , "\." )
353
+ file_extensions_regex = "|" .join (file_extensions ).replace ("." , r "\." )
353
354
# Manga extensions regex to be used throughout the script
354
- manga_extensions_regex = "|" .join (manga_extensions ).replace ("." , "\." )
355
+ manga_extensions_regex = "|" .join (manga_extensions ).replace ("." , r "\." )
355
356
# Novel extensions regex to be used throughout the script
356
- novel_extensions_regex = "|" .join (novel_extensions ).replace ("." , "\." )
357
+ novel_extensions_regex = "|" .join (novel_extensions ).replace ("." , r "\." )
357
358
# Image extensions regex to be used throughout the script
358
- image_extensions_regex = "|" .join (image_extensions ).replace ("." , "\." )
359
+ image_extensions_regex = "|" .join (image_extensions ).replace ("." , r "\." )
359
360
360
361
# REMINDER: ORDER IS IMPORTANT, Top to bottom is the order it will be checked in.
361
362
# Once a match is found, it will stop checking the rest.
@@ -2595,7 +2596,7 @@ def get_series_name_from_volume(name, root, test_mode=False, second=False):
2595
2596
if is_one_shot (name , root , test_mode = test_mode ):
2596
2597
name = re .sub (
2597
2598
r"([-_ ]+|)(((\[|\(|\{).*(\]|\)|\}))|LN)([-_. ]+|)(%s|).*"
2598
- % file_extensions_regex .replace ("\." , "" ),
2599
+ % file_extensions_regex .replace (r "\." , "" ),
2599
2600
"" ,
2600
2601
name ,
2601
2602
flags = re .IGNORECASE ,
@@ -3345,8 +3346,12 @@ def is_first_image_black_and_white(zip_path):
3345
3346
# Return the number of image files in the .cbz archive.
3346
3347
def count_images_in_cbz (file_path ):
3347
3348
try :
3348
- with zipfile .ZipFile (file_path , 'r' ) as archive :
3349
- images = [f for f in archive .namelist () if f .lower ().endswith (('.jpg' , '.jpeg' , '.png' , '.gif' , '.webp' ))]
3349
+ with zipfile .ZipFile (file_path , "r" ) as archive :
3350
+ images = [
3351
+ f
3352
+ for f in archive .namelist ()
3353
+ if f .lower ().endswith (tuple (image_extensions ))
3354
+ ]
3350
3355
return len (images )
3351
3356
except zipfile .BadZipFile :
3352
3357
send_message (f"Skipping corrupted file: { file_path } " , error = True )
@@ -3471,13 +3476,18 @@ def upgrade_to_volume_class(
3471
3476
and file_obj .extension in manga_extensions
3472
3477
and file_obj .file_type != "chapter"
3473
3478
and (not file_obj .volume_number or file_obj .is_one_shot )
3474
- and (check_for_exception_keywords (file_obj .name , exception_keywords ) or file_obj .is_one_shot )
3479
+ and (
3480
+ check_for_exception_keywords (file_obj .name , exception_keywords )
3481
+ or file_obj .is_one_shot
3482
+ )
3475
3483
):
3476
- if is_first_image_black_and_white (file_obj .path ) or count_images_in_cbz (file_obj .path ) <= average_chapter_image_count :
3484
+ if (
3485
+ is_first_image_black_and_white (file_obj .path )
3486
+ or count_images_in_cbz (file_obj .path ) <= average_chapter_image_count
3487
+ ):
3477
3488
file_obj .file_type = "chapter"
3478
3489
file_obj .is_one_shot = True
3479
3490
3480
-
3481
3491
if file_obj .is_one_shot :
3482
3492
file_obj .volume_number = 1
3483
3493
@@ -4880,19 +4890,19 @@ def normalize_str(
4880
4890
"kara" ,
4881
4891
"to" ,
4882
4892
"ya" ,
4883
- "no(?!\.)" ,
4893
+ r "no(?!\.)" ,
4884
4894
"ne" ,
4885
4895
"yo" ,
4886
4896
]
4887
4897
words_to_remove .extend (japanese_particles )
4888
4898
4889
4899
if not skip_misc_words :
4890
- misc_words = ["((\d+)([-_. ]+)?th)" , "x" , "×" , "HD" ]
4900
+ misc_words = [r "((\d+)([-_. ]+)?th)" , "x" , "×" , "HD" ]
4891
4901
words_to_remove .extend (misc_words )
4892
4902
4893
4903
if not skip_storefront_keywords :
4894
4904
storefront_keywords = [
4895
- "Book(\s+)?walker" ,
4905
+ r "Book(\s+)?walker" ,
4896
4906
]
4897
4907
words_to_remove .extend (storefront_keywords )
4898
4908
@@ -11754,11 +11764,11 @@ def move_series_to_correct_library(paths_to_search=paths_with_types):
11754
11764
# Normalize path separators and remove Windows drive letters if present.
11755
11765
def normalize_path (path ):
11756
11766
path = os .path .normpath (path )
11757
-
11767
+
11758
11768
# Remove Windows drive letters (e.g., "Z:\example\path" -> "\example\path")
11759
11769
if ":" in path :
11760
11770
path = re .sub (r"^[A-Za-z]:" , "" , path )
11761
-
11771
+
11762
11772
# Convert backslashes to forward slashes for uniform comparison
11763
11773
return path .replace ("\\ " , "/" )
11764
11774
@@ -11767,7 +11777,7 @@ def normalize_path(path):
11767
11777
def is_root_present (root_path , target_path ):
11768
11778
root_path = normalize_path (root_path )
11769
11779
target_path = normalize_path (target_path )
11770
-
11780
+
11771
11781
return root_path in target_path
11772
11782
11773
11783
0 commit comments