diff --git a/README.md b/README.md index 31d20f6..d325dd6 100644 --- a/README.md +++ b/README.md @@ -263,12 +263,12 @@ The default name format is: `"{Callsign} {Nearest City} {Landmark}"` # Bonus: Trim the Contact List -Download the usersDB.bin with Farnsworth editcp. +For MD-UV380 only. python -m dzcb.contact_trim \ - < "~/.cache/codeplug/Codeplug Editor/usersDB.bin" \ - > "~/.cache/codeplug/Codeplug Editor/usersDB-trimmed.bin" + "~/.cache/codeplug/Codeplug Editor/usersDB.bin" +Write the database with editcp (do NOT check Download New) # Basic Usage diff --git a/src/dzcb/contacts_trim.py b/src/dzcb/contacts_trim.py index bc3fb1e..f9afd09 100644 --- a/src/dzcb/contacts_trim.py +++ b/src/dzcb/contacts_trim.py @@ -1,57 +1,68 @@ """ dzcb.contacts_trim - remove contacts to get under radio limits """ +import argparse +import csv +import json +from pathlib import Path +import time import sys -remove_suffixes = [ - ",", - ",GR", - ",CY", - ",CN", - ",BE", - ",FR", - ",ES", - ",IT", - ",RU", - ",PL", - ",DE", - ",PT", - ",TR", - ",SI", - ",JP", - ",Korea Republic of", - ",PH", - ",MY", - ",TH", - ",AR", - ",BR", - ",CL", - ",CO", - ",VE", - ",UY", - ",SE", - ",CH", - ",CZ", - ",SK", - "BA", - "HR", - "AT", -] +import requests + +from dzcb import appdir + +RADIO_ID_USERS_JSON = "https://database.radioid.net/static/users.json" +RADIO_ID_USERS_MAX_AGE = 3600 * 12.1 + total = 0 -def check_suffix(line): - for s in remove_suffixes: - if line.strip().endswith(s): - return None - return line +def cached_json(url, max_age=RADIO_ID_USERS_MAX_AGE): + cachedir = Path(appdir.user_cache_dir) + filepath = cachedir / "usersdb.json" + if not filepath.exists() or filepath.stat().st_mtime < time.time() - max_age: + # cache is expired, need to refetch + cachedir.mkdir(parents=True, exist_ok=True) + resp = requests.get(url) + filepath.write_bytes(resp.content) + return filepath + + +def group_users_by(field_name): + groups = {} + with open(cached_json(RADIO_ID_USERS_JSON)) as f: + db = json.load(f) + for user in db["users"]: + groups.setdefault(user[field_name].lower(), []).append(user) + return groups + + +def flatten_groups(groups): + users = [] + for ulist in groups.values(): + users.extend(ulist) + return users + +def users_to_md_uv380_csv(users, output): + fields = ["radio_id", "callsign", "fname", "city", "state", "remarks", "country"] + with open(output, "w", newline="") as out: + csvw = csv.DictWriter(out, fieldnames=fields, extrasaction="ignore") + for u in users: + u["fname"] = u["fname"].partition(" ")[0].capitalize() + u["city"] = u["city"].capitalize() + u["state"] = u["state"].capitalize() + u["remarks"] = "" + csvw.writerow(u) -for line in sys.stdin: - outline = check_suffix(line) - if outline: - total += 1 - sys.stdout.write(outline) -sys.stderr.write(f"Wrote {total} records") +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("output_file") + args = parser.parse_args() + users_by_country = group_users_by("country") + filtered = users_by_country["united states"] + users_by_country["canada"] + users_to_md_uv380_csv(filtered, args.output_file) + sys.stderr.write(f"Wrote {len(filtered)} records")