46
46
import settings as settings_file
47
47
48
48
# Version of the script
49
- script_version = (2 , 5 , 19 )
49
+ script_version = (2 , 5 , 20 )
50
50
script_version_text = "v{}.{}.{}" .format (* script_version )
51
51
52
52
# Paths = existing library
@@ -1730,7 +1730,7 @@ def handle_fields(embed, fields):
1730
1730
1731
1731
1732
1732
# Handles picking a webhook url, to evenly distribute the load
1733
- @lru_cache (maxsize = None )
1733
+ @lru_cache (maxsize = 10 )
1734
1734
def pick_webhook (hook , passed_webhook = None , url = None ):
1735
1735
global last_hook_index
1736
1736
@@ -1853,7 +1853,7 @@ def ends_with_bracket(s):
1853
1853
1854
1854
1855
1855
# check if volume file name is a chapter
1856
- @lru_cache (maxsize = None )
1856
+ @lru_cache (maxsize = 3500 )
1857
1857
def contains_chapter_keywords (file_name ):
1858
1858
# Replace "_extra"
1859
1859
file_name_clean = file_name .replace ("_extra" , ".5" )
@@ -1925,7 +1925,7 @@ def contains_brackets(s):
1925
1925
1926
1926
# Removes bracketed content from the string, alongwith any whitespace.
1927
1927
# As long as the bracketed content is not immediately preceded or followed by a dash.
1928
- @lru_cache (maxsize = None )
1928
+ @lru_cache (maxsize = 3500 )
1929
1929
def remove_brackets (string ):
1930
1930
# Avoid a string that is only a bracket
1931
1931
# Probably a series name
@@ -1981,7 +1981,7 @@ def remove_brackets(string):
1981
1981
1982
1982
1983
1983
# Checks if the passed string contains volume keywords
1984
- @lru_cache (maxsize = None )
1984
+ @lru_cache (maxsize = 3500 )
1985
1985
def contains_volume_keywords (file ):
1986
1986
# Replace _extra
1987
1987
file = file .replace ("_extra" , ".5" )
@@ -2344,7 +2344,7 @@ def get_novel_cover(novel_path):
2344
2344
2345
2345
2346
2346
# Checks if the passed string is a volume one.
2347
- @lru_cache (maxsize = None )
2347
+ @lru_cache (maxsize = 3500 )
2348
2348
def is_volume_one (volume_name ):
2349
2349
keywords = volume_regex_keywords
2350
2350
@@ -2390,7 +2390,7 @@ def is_one_shot(file_name, root=None, skip_folder_check=False, test_mode=False):
2390
2390
2391
2391
2392
2392
# Checks similarity between two strings.
2393
- @lru_cache (maxsize = None )
2393
+ @lru_cache (maxsize = 3500 )
2394
2394
def similar (a , b ):
2395
2395
# convert to lowercase and strip
2396
2396
a = a .lower ().strip ()
@@ -2564,7 +2564,7 @@ def contains_keyword(file_string, chapter=False):
2564
2564
2565
2565
# Retrieves the series name through various regexes
2566
2566
# Removes the volume number and anything to the right of it, and strips it.
2567
- @lru_cache (maxsize = None )
2567
+ @lru_cache (maxsize = 3500 )
2568
2568
def get_series_name_from_volume (name , root , test_mode = False , second = False ):
2569
2569
# Remove starting brackets
2570
2570
# EX: "[WN] Series Name" -> "Series Name"
@@ -2654,7 +2654,7 @@ def get_series_name_from_volume(name, root, test_mode=False, second=False):
2654
2654
2655
2655
2656
2656
# Cleans the chapter file_name to retrieve the series_name
2657
- @lru_cache (maxsize = None )
2657
+ @lru_cache (maxsize = 3500 )
2658
2658
def chapter_file_name_cleaning (
2659
2659
file_name , chapter_number = "" , skip = False , regex_matched = False
2660
2660
):
@@ -2833,7 +2833,7 @@ def get_folder_type(files, extensions=None, file_type=None):
2833
2833
# Determines if a volume file is a multi-volume file or not
2834
2834
# EX: TRUE == series_title v01-03.cbz
2835
2835
# EX: FALSE == series_title v01.cbz
2836
- @lru_cache (maxsize = None )
2836
+ @lru_cache (maxsize = 3500 )
2837
2837
def check_for_multi_volume_file (file_name , chapter = False ):
2838
2838
# Set the list of keywords to search for
2839
2839
keywords = volume_regex_keywords if not chapter else chapter_regex_keywords + "|"
@@ -2915,7 +2915,7 @@ def contains_non_numeric(input_string):
2915
2915
2916
2916
2917
2917
# Finds the volume/chapter number(s) in the file name.
2918
- @lru_cache (maxsize = None )
2918
+ @lru_cache (maxsize = 3500 )
2919
2919
def get_release_number (file , chapter = False ):
2920
2920
2921
2921
# Cleans up the chapter's series name
@@ -3196,7 +3196,7 @@ def get_extra_from_group(
3196
3196
3197
3197
3198
3198
# Retrieves and returns the file part from the file name
3199
- @lru_cache (maxsize = None )
3199
+ @lru_cache (maxsize = 3500 )
3200
3200
def get_file_part (file , chapter = False , series_name = None , subtitle = None ):
3201
3201
result = ""
3202
3202
@@ -4693,7 +4693,7 @@ def reorganize_and_rename(files, dir):
4693
4693
4694
4694
4695
4695
# Replaces any pesky double spaces
4696
- @lru_cache (maxsize = None )
4696
+ @lru_cache (maxsize = 3500 )
4697
4697
def remove_dual_space (s ):
4698
4698
if " " not in s :
4699
4699
return s
@@ -4704,7 +4704,7 @@ def remove_dual_space(s):
4704
4704
# Removes common words to improve string matching accuracy between a series_name
4705
4705
# from a file name, and a folder name, useful for when releasers sometimes include them,
4706
4706
# and sometimes don't.
4707
- @lru_cache (maxsize = None )
4707
+ @lru_cache (maxsize = 3500 )
4708
4708
def normalize_str (
4709
4709
s ,
4710
4710
skip_common_words = False ,
@@ -4807,7 +4807,7 @@ def normalize_str(
4807
4807
4808
4808
4809
4809
# Removes the s from any words that end in s
4810
- @lru_cache (maxsize = None )
4810
+ @lru_cache (maxsize = 3500 )
4811
4811
def remove_s (s ):
4812
4812
return re .sub (r"\b(\w+)(s)\b" , r"\1" , s , flags = re .IGNORECASE ).strip ()
4813
4813
@@ -4822,14 +4822,14 @@ def contains_punctuation(s):
4822
4822
4823
4823
4824
4824
# Returns a string without punctuation.
4825
- @lru_cache (maxsize = None )
4825
+ @lru_cache (maxsize = 3500 )
4826
4826
def remove_punctuation (s ):
4827
4827
return re .sub (r"[^\w\s+]" , " " , s ).strip ()
4828
4828
4829
4829
4830
4830
# Cleans the string by removing punctuation, bracketed info, and replacing underscores with periods.
4831
4831
# Converts the string to lowercase and removes leading/trailing whitespace.
4832
- @lru_cache (maxsize = None )
4832
+ @lru_cache (maxsize = 3500 )
4833
4833
def clean_str (
4834
4834
string ,
4835
4835
skip_lowercase_convert = False ,
@@ -5041,7 +5041,7 @@ def create_folders_for_items_in_download_folder():
5041
5041
5042
5042
5043
5043
# convert string to acsii
5044
- @lru_cache (maxsize = None )
5044
+ @lru_cache (maxsize = 3500 )
5045
5045
def convert_to_ascii (s ):
5046
5046
return "" .join (i for i in s if ord (i ) < 128 )
5047
5047
@@ -5475,7 +5475,7 @@ def remove_duplicates(items):
5475
5475
5476
5476
# Return the zip comment for the passed zip file (cached)
5477
5477
# Used on existing library files.
5478
- @lru_cache (maxsize = None )
5478
+ @lru_cache (maxsize = 3500 )
5479
5479
def get_zip_comment_cache (zip_file ):
5480
5480
comment = ""
5481
5481
try :
@@ -5780,7 +5780,7 @@ def check_for_duplicate_volumes(paths_to_search=[]):
5780
5780
5781
5781
5782
5782
# Regex out underscore from passed string and return it
5783
- @lru_cache (maxsize = None )
5783
+ @lru_cache (maxsize = 3500 )
5784
5784
def replace_underscores (name ):
5785
5785
# Replace underscores that are preceded and followed by a number with a period
5786
5786
name = re .sub (r"(?<=\d)_(?=\d)" , "." , name )
@@ -5849,7 +5849,7 @@ def get_identifiers(zip_comment):
5849
5849
5850
5850
# Parses the individual words from the passed string and returns them as an array
5851
5851
# without punctuation, unidecoded, and in lowercase.
5852
- @lru_cache (maxsize = None )
5852
+ @lru_cache (maxsize = 3500 )
5853
5853
def parse_words (user_string ):
5854
5854
words = []
5855
5855
if user_string :
@@ -5869,7 +5869,7 @@ def parse_words(user_string):
5869
5869
5870
5870
5871
5871
# Finds a number of consecutive items in both arrays, or returns False if none are found.
5872
- @lru_cache (maxsize = None )
5872
+ @lru_cache (maxsize = 3500 )
5873
5873
def find_consecutive_items (arr1 , arr2 , count = 3 ):
5874
5874
if len (arr1 ) < count or len (arr2 ) < count :
5875
5875
return False
@@ -7415,7 +7415,7 @@ def isint(x):
7415
7415
7416
7416
7417
7417
# check if zip file contains ComicInfo.xml
7418
- @lru_cache (maxsize = None )
7418
+ @lru_cache (maxsize = 3500 )
7419
7419
def contains_comic_info (zip_file ):
7420
7420
result = False
7421
7421
try :
@@ -8436,7 +8436,7 @@ def is_blank_image(image_data):
8436
8436
8437
8437
8438
8438
# Returns the highest volume number and volume part number of a release in a list of volume releases
8439
- @lru_cache (maxsize = None )
8439
+ @lru_cache (maxsize = 3500 )
8440
8440
def get_highest_release (releases , is_chapter_directory = False ):
8441
8441
highest_num = ""
8442
8442
@@ -9450,7 +9450,7 @@ def get_subtitle_from_dash(title, replace=False):
9450
9450
# Extracts the subtitle from a file.name
9451
9451
# (year required in brackets at the end of the subtitle)
9452
9452
# EX: Sword Art Online v13 - Alicization Dividing [2018].epub -> Alicization Dividing
9453
- @lru_cache (maxsize = None )
9453
+ @lru_cache (maxsize = 3500 )
9454
9454
def get_subtitle_from_title (file , publisher = None ):
9455
9455
subtitle = ""
9456
9456
@@ -10853,7 +10853,7 @@ def has_one_set_of_numbers(string, chapter=False, file=None, subtitle=None):
10853
10853
10854
10854
10855
10855
# Check if there is more than one set of numbers in the string
10856
- @lru_cache (maxsize = None )
10856
+ @lru_cache (maxsize = 3500 )
10857
10857
def has_multiple_numbers (file_name ):
10858
10858
return len (re .findall (r"\d+\.0+[1-9]+|\d+\.[1-9]+|\d+" , file_name )) > 1
10859
10859
@@ -10952,7 +10952,7 @@ def prep_images_for_similarity(
10952
10952
blank_image_path , internal_cover_data , both_cover_data = False , silent = False
10953
10953
):
10954
10954
10955
- def resize_images (img1 , img2 , desired_width = 600 , desired_height = 400 ):
10955
+ def resize_images (img1 , img2 , desired_width = 400 , desired_height = 600 ):
10956
10956
img1_resized = cv2 .resize (
10957
10957
img1 , (desired_width , desired_height ), interpolation = cv2 .INTER_AREA
10958
10958
)
@@ -11414,7 +11414,7 @@ def correct_file_extensions():
11414
11414
print ("\t \t \t Skipped" )
11415
11415
11416
11416
11417
- # Checks existing series within existing libraries to see if their type matche sthe library they're in
11417
+ # Checks existing series within existing libraries to see if their type matches the library they're in
11418
11418
# If not, it moves the series to the appropriate library
11419
11419
def move_series_to_correct_library (paths_to_search = paths_with_types ):
11420
11420
global grouped_notifications
@@ -11435,6 +11435,7 @@ def move_series_to_correct_library(paths_to_search=paths_with_types):
11435
11435
print (f"\n Searching { p .path } for incorrectly matching series types..." )
11436
11436
for root , dirs , files in scandir .walk (p .path ):
11437
11437
print (f"\t { root } " )
11438
+
11438
11439
files , dirs = process_files_and_folders (
11439
11440
root ,
11440
11441
files ,
0 commit comments