From f110bf51f16633ed7559073f8487e12bf423d583 Mon Sep 17 00:00:00 2001 From: Jonathan Otto <76476352+JonaOtto@users.noreply.github.com> Date: Fri, 9 Sep 2022 19:30:57 +0200 Subject: [PATCH 01/48] Wait_finished method for job API (regarding #240) (#242) * Fix introduced typo in partition information dictionary key. (#241) * Added wait_finished method to job class (#240). * Added test method for wait_finished method of the job class. * Added _load_single_job method to the job class to extract the slurm_load_job functionality. * Updated find_id and wait_finished to use _load_single_job. Co-authored-by: Jonathan Goodson --- pyslurm/pyslurm.pyx | 86 ++++++++++++++++++++++++++++++++++++--------- tests/test_job.py | 76 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 145 insertions(+), 17 deletions(-) diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index 9e4a0151..16c72dc9 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -598,7 +598,7 @@ cdef class config: Ctl_dict['cpu_freq_govs'] = self.__Config_ptr.cpu_freq_govs Ctl_dict['cred_type'] = slurm.stringOrNone(self.__Config_ptr.cred_type, '') Ctl_dict['debug_flags'] = self.__Config_ptr.debug_flags - Ctl_dict['def_mem_per_cp'] = self.__Config_ptr.def_mem_per_cpu + Ctl_dict['def_mem_per_cpu'] = self.__Config_ptr.def_mem_per_cpu Ctl_dict['dependency_params'] = slurm.stringOrNone(self.__Config_ptr.dependency_params, '') Ctl_dict['eio_timeout'] = self.__Config_ptr.eio_timeout Ctl_dict['enforce_part_limits'] = bool(self.__Config_ptr.enforce_part_limits) @@ -1023,16 +1023,16 @@ cdef class partition: if record.def_mem_per_cpu & slurm.MEM_PER_CPU: if record.def_mem_per_cpu == slurm.MEM_PER_CPU: - Part_dict['def_mem_per_cp'] = "UNLIMITED" + Part_dict['def_mem_per_cpu'] = "UNLIMITED" Part_dict['def_mem_per_node'] = None else: - Part_dict['def_mem_per_cp'] = record.def_mem_per_cpu & (~slurm.MEM_PER_CPU) + Part_dict['def_mem_per_cpu'] = record.def_mem_per_cpu & (~slurm.MEM_PER_CPU) Part_dict['def_mem_per_node'] = None elif record.def_mem_per_cpu == 0: - Part_dict['def_mem_per_cp'] = None + Part_dict['def_mem_per_cpu'] = None Part_dict['def_mem_per_node'] = "UNLIMITED" else: - Part_dict['def_mem_per_cp'] = None + Part_dict['def_mem_per_cpu'] = None Part_dict['def_mem_per_node'] = record.def_mem_per_cpu if record.default_time == slurm.INFINITE: @@ -1774,35 +1774,55 @@ cdef class job: return retList - def find_id(self, jobid): - """Retrieve job ID data. + cdef _load_single_job(self, jobid): + """ + Uses slurm_load_job to setup the self._job_ptr for a single job given by the jobid. + After calling this, the job pointer can be used in other methods + to operate on the informations of the job. - This method accepts both string and integer formats of the jobid. It - calls slurm_xlate_job_id() to convert the jobid appropriately. - This works for single jobs and job arrays. + This method accepts both string and integer formate of the jobid. It + calls slurm_xlate_job_id to convert the jobid appropriately. - :param str jobid: Job id key string to search - :returns: List of dictionary of values for given job id - :rtype: `list` + Raises an value error if the jobid does not correspond to a existing job. + + :param str jobid: The jobid + :returns: void + :rtype: None. """ cdef: int apiError int rc + # jobid can be given as int or string if isinstance(jobid, int) or isinstance(jobid, long): jobid = str(jobid).encode("UTF-8") else: jobid = jobid.encode("UTF-8") - + # convert jobid appropriately for slurm jobid_xlate = slurm.slurm_xlate_job_id(jobid) + + # load the job which sets the self._job_ptr pointer rc = slurm.slurm_load_job(&self._job_ptr, jobid_xlate, self._ShowFlags) - if rc == slurm.SLURM_SUCCESS: - return list(self.get_job_ptr().values()) - else: + if rc != slurm.SLURM_SUCCESS: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) + def find_id(self, jobid): + """Retrieve job ID data. + + This method accepts both string and integer formats of the jobid. + This works for single jobs and job arrays. It uses the internal + helper _load_single_job to do slurm_load_job. If the job corresponding + to the jobid does not exist, a ValueError will be raised. + + :param str jobid: Job id key string to search + :returns: List of dictionary of values for given job id + :rtype: `list` + """ + self._load_single_job(jobid) + return list(self.get_job_ptr().values()) + def find_user(self, user): """Retrieve a user's job data. @@ -2879,6 +2899,38 @@ cdef class job: #return "Submitted batch job %s" % job_id return job_id + def wait_finished(self, jobid): + """ + Block until the job given by the jobid finishes. + This works for single jobs, as well as job arrays. + :param jobid: The job id of the slurm job. + To reference a job with job array set, use the first/"master" jobid + (the same as given by squeue) + :returns: The exit code of the slurm job. + :rtype: `int` + """ + exit_status = -9999 + complete = False + while not complete: + complete = True + p_time.sleep(5) + self._load_single_job(jobid) + for i in range(0, self._job_ptr.record_count): + self._record = &self._job_ptr.job_array[i] + if IS_JOB_COMPLETED(self._job_ptr.job_array[i]): + exit_status_arrayjob = None + if WIFEXITED(self._record.exit_code): + exit_status_arrayjob = WEXITSTATUS(self._record.exit_code) + else: + exit_status_arrayjob = 1 + # set exit code to the highest of all jobs in job array + exit_status = max([exit_status, exit_status_arrayjob]) + else: + # go on with the next interation, unil all jobs in array are completed + complete = False + slurm.slurm_free_job_info_msg(self._job_ptr) + return exit_status + def slurm_pid2jobid(uint32_t JobPID=0): """Get the slurm job id from a process id. diff --git a/tests/test_job.py b/tests/test_job.py index efb16c09..e11bb65c 100644 --- a/tests/test_job.py +++ b/tests/test_job.py @@ -110,3 +110,79 @@ def test_job_kill(): # time.sleep(3) # test_job_search_after = pyslurm.job().find_id(test_job_id)[0] # assert_equals(test_job_search_after.get("job_state"), "FAILED") + + +def test_job_wait_finished(): + """Job: Test job().wait_finished().""" + test_job = { + "wrap": "sleep 30", + "job_name": "pyslurm_test_job", + "ntasks": 1, + "cpus_per_task": 1, + } + test_job_id = pyslurm.job().submit_batch_job(test_job) + start_job_state = pyslurm.job().find_id(test_job_id)[0]["job_state"] + + # wait for the job to finish + exit_code = pyslurm.job().wait_finished(test_job_id) + + end_job_state = pyslurm.job().find_id(test_job_id)[0]["job_state"] + assert start_job_state != "COMPLETED" + assert end_job_state == "COMPLETED" + assert exit_code == 0 + + # test again with another wrap + test_job = { + "wrap": "sleep 300; exit 1", # "exit 1" should yield failure ending + "job_name": "pyslurm_test_job", + "ntasks": 1, + "cpus_per_task": 1, + } + test_job_id = pyslurm.job().submit_batch_job(test_job) + start_job_state = pyslurm.job().find_id(test_job_id)[0]["job_state"] + + # wait for the job to finish + exit_code = pyslurm.job().wait_finished(test_job_id) + + end_job_state = pyslurm.job().find_id(test_job_id)[0]["job_state"] + assert start_job_state != "COMPLETED" + assert end_job_state == "FAILED" + assert exit_code == 1 + + +def test_job_wait_finished_w_arrays(): + """Job: Test job().wait_finished() with job arrays.""" + test_job = { + "wrap": "sleep 30; exit 0", + "job_name": "pyslurm_array_test_job", + "ntasks": 1, + "cpus_per_task": 1, + "array_inx": "0,1,2", + } + test_job_id = pyslurm.job().submit_batch_job(test_job) + start_job_state = pyslurm.job().find_id(test_job_id)[0]["job_state"] + # wait for the job to finish + exit_code = pyslurm.job().wait_finished(test_job_id) + end_job_state = pyslurm.job().find_id(test_job_id)[0]["job_state"] + assert start_job_state != "COMPLETED" + assert end_job_state == "COMPLETED" + assert exit_code == 0 + + # test for exit codes: maximum exit code of all array jobs + test_job = { + # use array ID as exit code to yield different exit codes: 0, 1, 2 + "wrap": "sleep 30; exit $SLURM_ARRAY_TASK_ID", + "job_name": "pyslurm_array_test_job", + "ntasks": 1, + "cpus_per_task": 1, + "array_inx": "0,1,2", + } + test_job_id = pyslurm.job().submit_batch_job(test_job) + start_job_state = pyslurm.job().find_id(test_job_id)[0]["job_state"] + # wait for the job to finish + exit_code = pyslurm.job().wait_finished(test_job_id) + end_job_state = pyslurm.job().find_id(test_job_id)[0]["job_state"] + assert start_job_state != "COMPLETED" + # exit code 2 (the maximum of all) should yield FAILED for the entire job + assert end_job_state == "FAILED" + assert exit_code == 2 From f60220cf63a17fd386ae8eab161319cfa19fbac7 Mon Sep 17 00:00:00 2001 From: Jonathan Otto <76476352+JonaOtto@users.noreply.github.com> Date: Sat, 10 Sep 2022 17:13:18 +0200 Subject: [PATCH 02/48] Fixed mem_per_cpu setting in job class (#243) * Fixed mem_per_cpu setting. * Fixed settings with mem_per_cp mentions. --- pyslurm/pyslurm.pyx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index 16c72dc9..78465587 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -655,7 +655,7 @@ cdef class config: Ctl_dict['max_dbd_msgs'] = self.__Config_ptr.max_dbd_msgs Ctl_dict['max_job_cnt'] = self.__Config_ptr.max_job_cnt Ctl_dict['max_job_id'] = self.__Config_ptr.max_job_id - Ctl_dict['max_mem_per_cp'] = self.__Config_ptr.max_mem_per_cpu + Ctl_dict['max_mem_per_cpu'] = self.__Config_ptr.max_mem_per_cpu Ctl_dict['max_step_cnt'] = self.__Config_ptr.max_step_cnt Ctl_dict['max_tasks_per_node'] = self.__Config_ptr.max_tasks_per_node Ctl_dict['min_job_age'] = self.__Config_ptr.min_job_age @@ -1058,16 +1058,16 @@ cdef class partition: if record.max_mem_per_cpu & slurm.MEM_PER_CPU: if record.max_mem_per_cpu == slurm.MEM_PER_CPU: - Part_dict['max_mem_per_cp'] = "UNLIMITED" + Part_dict['max_mem_per_cpu'] = "UNLIMITED" Part_dict['max_mem_per_node'] = None else: - Part_dict['max_mem_per_cp'] = record.max_mem_per_cpu & (~slurm.MEM_PER_CPU) + Part_dict['max_mem_per_cpu'] = record.max_mem_per_cpu & (~slurm.MEM_PER_CPU) Part_dict['max_mem_per_node'] = None elif record.max_mem_per_cpu == 0: - Part_dict['max_mem_per_cp'] = None + Part_dict['max_mem_per_cpu'] = None Part_dict['max_mem_per_node'] = "UNLIMITED" else: - Part_dict['max_mem_per_cp'] = None + Part_dict['max_mem_per_cpu'] = None Part_dict['max_mem_per_node'] = record.max_mem_per_cpu if record.max_nodes == slurm.INFINITE: @@ -2064,12 +2064,12 @@ cdef class job: if self._record.pn_min_memory & slurm.MEM_PER_CPU: self._record.pn_min_memory &= (~slurm.MEM_PER_CPU) - Job_dict['mem_per_cp'] = True + Job_dict['mem_per_cpu'] = True Job_dict['min_memory_cp'] = self._record.pn_min_memory Job_dict['mem_per_node'] = False Job_dict['min_memory_node'] = None else: - Job_dict['mem_per_cp'] = False + Job_dict['mem_per_cpu'] = False Job_dict['min_memory_cp'] = None Job_dict['mem_per_node'] = True Job_dict['min_memory_node'] = self._record.pn_min_memory @@ -2517,8 +2517,8 @@ cdef class job: if job_opts.get("realmem"): desc.pn_min_memory = job_opts.get("realmem") - elif job_opts.get("mem_per_cp"): - desc.pn_min_memory = job_opts.get("mem_per_cp") | slurm.MEM_PER_CPU + elif job_opts.get("mem_per_cpu"): + desc.pn_min_memory = job_opts.get("mem_per_cpu") | slurm.MEM_PER_CPU if job_opts.get("tmpdisk"): desc.pn_min_tmp_disk = job_opts.get("tmpdisk") @@ -5281,10 +5281,10 @@ cdef class slurmdb_jobs: if job.req_mem & slurm.MEM_PER_CPU: JOBS_info['req_mem'] = job.req_mem & (~slurm.MEM_PER_CPU) - JOBS_info['req_mem_per_cp'] = True + JOBS_info['req_mem_per_cpu'] = True else: JOBS_info['req_mem'] = job.req_mem - JOBS_info['req_mem_per_cp'] = False + JOBS_info['req_mem_per_cpu'] = False JOBS_info['requid'] = job.requid JOBS_info['resvid'] = job.resvid From 7b370f724b6edd246dea4dd989ca5b79c430b449 Mon Sep 17 00:00:00 2001 From: schluenz Date: Fri, 23 Dec 2022 22:57:24 +0100 Subject: [PATCH 03/48] fix typos (#252) --- pyslurm/pyslurm.pyx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index 78465587..bea119ea 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -2065,12 +2065,12 @@ cdef class job: if self._record.pn_min_memory & slurm.MEM_PER_CPU: self._record.pn_min_memory &= (~slurm.MEM_PER_CPU) Job_dict['mem_per_cpu'] = True - Job_dict['min_memory_cp'] = self._record.pn_min_memory + Job_dict['min_memory_cpu'] = self._record.pn_min_memory Job_dict['mem_per_node'] = False Job_dict['min_memory_node'] = None else: Job_dict['mem_per_cpu'] = False - Job_dict['min_memory_cp'] = None + Job_dict['min_memory_cpu'] = None Job_dict['mem_per_node'] = True Job_dict['min_memory_node'] = self._record.pn_min_memory @@ -5106,17 +5106,17 @@ cdef class qos: QOS_info['grp_tres_run_mins'] = slurm.stringOrNone(qos.grp_tres_run_mins, '') # QOS_info['grp_tres_run_mins_ctld'] QOS_info['grp_wall'] = qos.grp_wall - QOS_info['max_jobs_p'] = qos.max_jobs_pu - QOS_info['max_submit_jobs_p'] = qos.max_submit_jobs_pu + QOS_info['max_jobs_pu'] = qos.max_jobs_pu + QOS_info['max_submit_jobs_pu'] = qos.max_submit_jobs_pu QOS_info['max_tres_mins_pj'] = slurm.stringOrNone(qos.max_tres_mins_pj, '') # QOS_info['max_tres_min_pj_ctld'] QOS_info['max_tres_pj'] = slurm.stringOrNone(qos.max_tres_pj, '') # QOS_info['max_tres_min_pj_ctld'] QOS_info['max_tres_pn'] = slurm.stringOrNone(qos.max_tres_pn, '') # QOS_info['max_tres_min_pn_ctld'] - QOS_info['max_tres_p'] = slurm.stringOrNone(qos.max_tres_pu, '') + QOS_info['max_tres_pu'] = slurm.stringOrNone(qos.max_tres_pu, '') # QOS_info['max_tres_min_pu_ctld'] - QOS_info['max_tres_run_mins_p'] = slurm.stringOrNone( + QOS_info['max_tres_run_mins_pu'] = slurm.stringOrNone( qos.max_tres_run_mins_pu, '') QOS_info['max_wall_pj'] = qos.max_wall_pj From da978a6cf0db3e22c15bc7bfd3d95f0bab865386 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Tue, 3 Jan 2023 19:58:06 +0100 Subject: [PATCH 04/48] Fix creating RPM packages (#248) * update the build-script to allow parallel builds Well, doesn't really do much right now since it's just one file being compiled - but maybe it will matter in the future. Also make it so the "install" command doesn't actually build anything, and just installs. Update the README to promote usage of the build.sh script Also remove the installation instruction for PyPi - it's not up there yet. * fix bdist_rpm not including pyslurm.pyx Just do a recursive include of all .pyx .pxd .pxi .h files --- MANIFEST.in | 2 +- README.md | 12 +++--------- scripts/build.sh | 28 ++++++++++++++++++++-------- setup.cfg | 13 ++++--------- setup.py | 17 ++++++++++------- 5 files changed, 38 insertions(+), 34 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index d33c1276..bbe17e2a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,4 +5,4 @@ graft tests graft doc graft pyslurm/slurm graft pyslurm/pydefines -include pyslurm/alps_cray.h +recursive-include pyslurm *.pyx *.px[di] *.h diff --git a/README.md b/README.md index 1bebb43e..413112a0 100644 --- a/README.md +++ b/README.md @@ -21,24 +21,18 @@ By default, it is searched inside `/usr/include` for the Header files and in For Slurm installations in different locations, you will need to provide the corresponding paths to the necessary files. -You can specify these Paths with environment variables, for example: +You can specify these Paths with environment variables (recommended), for example: ```shell export SLURM_INCLUDE_DIR=/opt/slurm/22.05/include export SLURM_LIB_DIR=/opt/slurm/22.05/lib ``` -Then you can proceed to install PySlurm, for example: - -```shell -pip install pyslurm==22.05.0 -``` - -Or by cloning the repository: +Then you can proceed to install PySlurm, for example by cloning the Repository: ```shell git clone https://github.com/PySlurm/pyslurm.git && cd pyslurm -python setup.py install +scripts/build.sh # Or simply with pip pip install . diff --git a/scripts/build.sh b/scripts/build.sh index 444fd108..b3e389c8 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -1,13 +1,25 @@ #!/bin/bash set -e -################################### -# Build PySlurm -################################### +usage() { echo "Usage: $0 [-j jobs]" 1>&2; exit 1; } -cd pyslurm -echo "---> Building PySlurm..." -python$PYTHON setup.py build +# Option to allow parallel build +OPT_JOBS=1 -echo "---> Installing PySlurm..." -python$PYTHON setup.py install +PYTHON_VERSION=3 + +while getopts ":j:" o; do + case "${o}" in + j) + OPT_JOBS=${OPTARG} + ;; + *) + usage + ;; + esac +done + +shift $((OPTIND-1)) + +python"$PYTHON_VERSION" setup.py build -j "$OPT_JOBS" +python"$PYTHON_VERSION" setup.py install diff --git a/setup.cfg b/setup.cfg index f23c586d..af6883db 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,17 +5,12 @@ docs=build_sphinx [bdist_rpm] release = 1 packager = Giovanni Torres -doc_files = CONTRIBUTORS.rst - README.rst - THANKS.rst +doc_files = README.md doc/ examples/ -build_requires = python-devel >= 2.7 - Cython >= 0.19 - python-sphinx >= 1.1 - slurm-devel >= 17.11.5 - python-nose -requires = slurm-slurmd slurm-slurmdbd +build_requires = python3-devel >= 3.6 + slurm-devel >= 22.05.0 +requires = slurm use_bzip2 = 1 [build_sphinx] diff --git a/setup.py b/setup.py index 4ddce583..5fd61c9a 100644 --- a/setup.py +++ b/setup.py @@ -300,9 +300,8 @@ def parse_setuppy_commands(): cleanup_build() return False - build_cmd = ('install', 'sdist', 'build', 'build_ext', 'build_py', - 'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm', - 'build_src', 'bdist_egg', 'develop') + build_cmd = ('build', 'build_ext', 'build_py', 'build_clib', + 'build_scripts', 'bdist_wheel', 'build_src', 'bdist_egg', 'develop') for cmd in build_cmd: if cmd in args: @@ -318,10 +317,14 @@ def setup_package(): build_it = parse_setuppy_commands() if build_it: - if "sdist" not in sys.argv: - parse_slurm_args() - slurm_sanity_checks() - cythongen() + parse_slurm_args() + slurm_sanity_checks() + cythongen() + + if "install" in sys.argv: + parse_slurm_args() + slurm_sanity_checks() + metadata["ext_modules"] = make_extensions() setup(**metadata) From a2fa6e1af0e6bb349f4e4c8dad19c75bee1bb9c2 Mon Sep 17 00:00:00 2001 From: Pablo Llopis Date: Fri, 6 Jan 2023 20:39:29 +0100 Subject: [PATCH 05/48] Support updating end_time in slurm_update_reservation (#255) --- pyslurm/pyslurm.pyx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index bea119ea..8cf4fd98 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -4314,6 +4314,9 @@ def slurm_update_reservation(dict reservation_dict={}): if time_value != -1: resv_msg.start_time = time_value + if reservation_dict.get('end_time'): + resv_msg.end_time = reservation_dict['end_time'] + if reservation_dict.get('duration'): resv_msg.duration = reservation_dict.get('duration') From ace7785076d9aa19992599459523914fb1667b07 Mon Sep 17 00:00:00 2001 From: Pablo Llopis Date: Fri, 6 Jan 2023 20:41:10 +0100 Subject: [PATCH 06/48] Fix formatting error for reservation_list example (#256) --- examples/reservation_list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/reservation_list.py b/examples/reservation_list.py index 57e8398f..87e3a337 100755 --- a/examples/reservation_list.py +++ b/examples/reservation_list.py @@ -36,7 +36,7 @@ def display(res_dict): if res_value["start_time"] <= now <= res_value["end_time"]: resv_state = "ACTIVE" - print(f"\t{'state':-20s} : {resv_state}\n") + print(f"\t{'state':<20s} : {resv_state}\n") if __name__ == "__main__": From 2617092e195b7f7d8343c6986eca9ca1dcf73148 Mon Sep 17 00:00:00 2001 From: Pablo Llopis Date: Fri, 6 Jan 2023 20:46:47 +0100 Subject: [PATCH 07/48] Raise ValueError on slurm_update_reservation (#257) To be consistent with other functions and methods, we need to raise a ValueError instead of just returning the error code. This also allows client applications to more easily interpret the reason for the error. --- pyslurm/pyslurm.pyx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index 8cf4fd98..a9237b4b 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -4375,6 +4375,9 @@ def slurm_update_reservation(dict reservation_dict={}): resv_msg.flags = int_value errCode = slurm.slurm_update_reservation(&resv_msg) + if errCode != 0: + apiError = slurm.slurm_get_errno() + raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) return errCode From ffc419f4cfc85bfbdf6b4ff1979b767cacbf8c87 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Sat, 4 Feb 2023 23:58:20 +0100 Subject: [PATCH 08/48] Actually retrieve the batch script contents (#258) * Fixes "incomplete type" complaints cython always complaint about "incomplete type" for pthread_mutex_t and sockaddr_storage. They must be atleast defined with a "pass", which makes the complaint go away. Also reformat the imports a bit. * Actually return the batch-script contents as a string. Previously it was just printed to stdout, so it wasn't really useful when wanting to process the contents any further in a script for example. --- UPGRADE_C_API.rst | 7 +- pyslurm/pyslurm.pyx | 60 +++++++++++++++-- pyslurm/slurm/__init__.pxd | 45 ++++++++++--- pyslurm/slurm/extra.pxi | 128 +++++++++++++++++++++++++++++++++++++ 4 files changed, 220 insertions(+), 20 deletions(-) diff --git a/UPGRADE_C_API.rst b/UPGRADE_C_API.rst index 6e7c84ae..3a47b2ed 100644 --- a/UPGRADE_C_API.rst +++ b/UPGRADE_C_API.rst @@ -51,13 +51,8 @@ Then, simply generate the header definitions like in this example: scripts/pyslurm_bindgen.py -D /directoy/with/slurm/headers > pyslurm/slurm/header.pxi The script outputs everything to `stdout`. Simply redirect the output to the file: :code:`pyslurm/slurm/header.pxi`. +The headers should now be fully translated. -Now, 99% of the work is done for generating the headers. For the 1% left, you now need to open the generated file, search for the two follwowing statements and comment them out: - -- `slurm_addr_t control_addr` -- `phtread_mutex_t lock` - -The compiler will otherwise complain that these are incomplete type definitions. Compiling, Updating, Testing ---------------------------- diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index a9237b4b..0dd42c2c 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -2312,19 +2312,71 @@ cdef class job: def slurm_job_batch_script(self, jobid): """ - Retrieve the batch script for a given jobid. + Return the contents of the batch-script for a Job. - :param str jobid: Job id key string to search - :returns: String output of a jobid's batch script + Note: The string returned also includes all the "\n" characters + (new-line). + + :param jobid: ID of the Job for which the script should be retrieved. + :type jobid: Union[str, int] + :raises: [ValueError]: When retrieving the Batch-Script for the Job was + not successful. + :returns: The content of the batch script. :rtype: `str` """ + # This reimplements the slurm_job_batch_script API call. Otherwise we + # would have to parse the FILE* ptr we get from it back into a + # char* which would be a bit silly. Source: + # https://github.com/SchedMD/slurm/blob/7162f15af8deaf02c3bbf940d59e818cdeb5c69d/src/api/job_info.c#L1319 + cdef: + slurm.job_id_msg_t msg + slurm.slurm_msg_t req + slurm.slurm_msg_t resp + int rc = slurm.SLURM_SUCCESS + str script = None + if isinstance(jobid, int) or isinstance(jobid, long): jobid = str(jobid).encode("UTF-8") else: jobid = jobid.encode("UTF-8") jobid_xlate = slurm.slurm_xlate_job_id(jobid) - return slurm.slurm_job_batch_script(slurm.stdout, jobid_xlate) + + slurm.slurm_msg_t_init(&req) + slurm.slurm_msg_t_init(&resp) + + memset(&msg, 0, sizeof(msg)) + msg.job_id = jobid_xlate + req.msg_type = slurm.REQUEST_BATCH_SCRIPT + req.data = &msg + + rc = slurm.slurm_send_recv_controller_msg(&req, &resp, + slurm.working_cluster_rec) + if rc < 0: + err = slurm.slurm_get_errno() + raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(err), ''), + err) + + if resp.msg_type == slurm.RESPONSE_BATCH_SCRIPT: + script = slurm.stringOrNone(resp.data, None) + slurm.xfree(resp.data) + + elif resp.msg_type == slurm.RESPONSE_SLURM_RC: + rc = ( resp.data).return_code + slurm.slurm_free_return_code_msg(resp.data) + + if rc == slurm.SLURM_ERROR: + rc = slurm.slurm_get_errno() + + raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(rc), ''), + rc) + + else: + rc = slurm.slurm_get_errno() + raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(rc), ''), + rc) + + return script cdef int fill_job_desc_from_opts(self, dict job_opts, slurm.job_desc_msg_t *desc): """ diff --git a/pyslurm/slurm/__init__.pxd b/pyslurm/slurm/__init__.pxd index d779bb18..3c64b282 100644 --- a/pyslurm/slurm/__init__.pxd +++ b/pyslurm/slurm/__init__.pxd @@ -1,19 +1,37 @@ from libcpp cimport bool -from posix.unistd cimport uid_t, pid_t, gid_t -from libc.stdint cimport int8_t, int16_t, int32_t, int64_t -from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t from cpython.version cimport PY_MAJOR_VERSION -from libc.string cimport strlen, memcpy -cdef extern from "" nogil: - ctypedef struct sockaddr_in - ctypedef struct sockaddr_storage +from posix.unistd cimport ( + uid_t, + pid_t, + gid_t, +) + +from libc.stdint cimport ( + int8_t, + int16_t, + int32_t, + int64_t, + uint8_t, + uint16_t, + uint32_t, + uint64_t, +) + +from libc.string cimport ( + strlen, + memcpy, +) + +cdef extern from '' nogil: + ctypedef struct sockaddr_storage: + pass cdef extern from '' nogil: ctypedef struct FILE cdef FILE *stdout -cdef extern from 'time.h' nogil: +cdef extern from '' nogil: ctypedef long time_t double difftime(time_t time1, time_t time2) time_t time(time_t *t) @@ -24,8 +42,15 @@ cdef extern from '' nogil: cdef int __LINE__ char *__FUNCTION__ -cdef extern from "" nogil: - ctypedef union pthread_mutex_t +cdef extern from '' nogil: + ctypedef struct pthread_mutex_t: + pass + + ctypedef struct pthread_cond_t: + pass + + ctypedef struct pthread_t: + pass cdef extern from *: ctypedef struct slurm_job_credential diff --git a/pyslurm/slurm/extra.pxi b/pyslurm/slurm/extra.pxi index f48572d4..50fccb23 100644 --- a/pyslurm/slurm/extra.pxi +++ b/pyslurm/slurm/extra.pxi @@ -1,4 +1,132 @@ +# +# Structs that are not in the Slurm headers, which need to be redefined +# in order to implement certain features. +# +# For example: to communicate with the slurmctld directly in order +# to retrieve the actual batch-script as a string. +# + +# https://github.com/SchedMD/slurm/blob/26abe9188ea8712ba1eab4a8eb6322851f06a108/src/common/slurm_persist_conn.h#L51 +ctypedef enum persist_conn_type_t: + PERSIST_TYPE_NONE = 0 + PERSIST_TYPE_DBD + PERSIST_TYPE_FED + PERSIST_TYPE_HA_CTL + PERSIST_TYPE_HA_DBD + PERSIST_TYPE_ACCT_UPDATE + +# https://github.com/SchedMD/slurm/blob/26abe9188ea8712ba1eab4a8eb6322851f06a108/src/common/slurm_persist_conn.h#L59 +ctypedef struct persist_msg_t: + void *conn + void *data + uint32_t data_size + uint16_t msg_type + +ctypedef int (*_slurm_persist_conn_t_callback_proc) (void *arg, persist_msg_t *msg, buf_t **out_buffer, uint32_t *uid) +ctypedef void (*_slurm_persist_conn_t_callback_fini)(void *arg) + +# https://github.com/SchedMD/slurm/blob/26abe9188ea8712ba1eab4a8eb6322851f06a108/src/common/slurm_persist_conn.h#L66 +ctypedef struct slurm_persist_conn_t: + void *auth_cred + _slurm_persist_conn_t_callback_proc callback_proc + _slurm_persist_conn_t_callback_fini callback_fini + char *cluster_name + time_t comm_fail_time + uint16_t my_port + int fd + uint16_t flags + bool inited + persist_conn_type_t persist_type + uid_t r_uid + char *rem_host + uint16_t rem_port + time_t *shutdown + pthread_t thread_id + int timeout + slurm_trigger_callbacks_t trigger_callbacks; + uint16_t version + +# https://github.com/SchedMD/slurm/blob/20e2b354168aeb0f76d67f80122d80925c2ef32b/src/common/pack.h#L68 +ctypedef struct buf_t: + uint32_t magic + char *head + uint32_t size + uint32_t processed + bool mmaped + +# https://github.com/SchedMD/slurm/blob/20e2b354168aeb0f76d67f80122d80925c2ef32b/src/common/pack.h#L68 +ctypedef struct return_code_msg_t: + uint32_t return_code + +# https://github.com/SchedMD/slurm/blob/fe82218def7b57f5ecda9222e80662ebbb6415f8/src/common/slurm_protocol_defs.h#L650 +ctypedef struct job_id_msg_t: + uint32_t job_id + uint16_t show_flags + +# https://github.com/SchedMD/slurm/blob/fe82218def7b57f5ecda9222e80662ebbb6415f8/src/common/slurm_protocol_defs.h#L216 +# Only partially defined - not everything needed at the moment. +ctypedef enum slurm_msg_type_t: + REQUEST_SHARE_INFO = 2022 + REQUEST_BATCH_SCRIPT = 2051 + RESPONSE_BATCH_SCRIPT = 2052 + RESPONSE_SLURM_RC = 8001 + +# https://github.com/SchedMD/slurm/blob/fe82218def7b57f5ecda9222e80662ebbb6415f8/src/common/slurm_protocol_defs.h#L469 +ctypedef struct forward_t: + uint16_t cnt + uint16_t init + char *nodelist + uint32_t timeout + uint16_t tree_width + +# https://github.com/SchedMD/slurm/blob/fe82218def7b57f5ecda9222e80662ebbb6415f8/src/common/slurm_protocol_defs.h#L491 +ctypedef struct forward_struct_t: + char *buf + int buf_len + uint16_t fwd_cnt + pthread_mutex_t forward_mutex + pthread_cond_t notify + List ret_list + uint32_t timeout + +# https://github.com/SchedMD/slurm/blob/fe82218def7b57f5ecda9222e80662ebbb6415f8/src/common/slurm_protocol_defs.h#L514 +ctypedef struct slurm_msg_t: + slurm_addr_t address + void *auth_cred + int auth_index + uid_t auth_uid + bool auth_uid_set + uid_t restrict_uid + bool restrict_uid_set + uint32_t body_offset + buf_t *buffer + slurm_persist_conn_t *conn + int conn_fd + void *data + uint32_t data_size + uint16_t flags + uint8_t hash_index + uint16_t msg_type + uint16_t protocol_version + forward_t forward + forward_struct_t *forward_struct + slurm_addr_t orig_addr + List ret_list + +# https://github.com/SchedMD/slurm/blob/fe82218def7b57f5ecda9222e80662ebbb6415f8/src/common/slurm_protocol_defs.c#L865 +cdef extern void slurm_free_return_code_msg(return_code_msg_t *msg) + +# https://github.com/SchedMD/slurm/blob/2d2e83674b59410a7ed8ab6fc8d8acfcfa8beaf9/src/common/slurm_protocol_api.c#L2401 +cdef extern int slurm_send_recv_controller_msg(slurm_msg_t *request_msg, + slurm_msg_t *response_msg, + slurmdb_cluster_rec_t *working_cluster_rec) + +# https://github.com/SchedMD/slurm/blob/fe82218def7b57f5ecda9222e80662ebbb6415f8/src/common/slurm_protocol_defs.c#L168 +cdef extern void slurm_msg_t_init(slurm_msg_t *msg) + + # Global Environment + cdef extern char **environ # From 61d4740c4c2e812e4983ab9558d1d23060b93c7f Mon Sep 17 00:00:00 2001 From: wresch Date: Thu, 9 Feb 2023 15:04:59 -0500 Subject: [PATCH 09/48] Brought __rpc_num2string in sync with slurm 22.05 message types (#261) --- pyslurm/pyslurm.pyx | 136 ++++++++++++++++++++------------- scripts/slurm_msg_type_dict.py | 46 +++++++++++ 2 files changed, 128 insertions(+), 54 deletions(-) create mode 100755 scripts/slurm_msg_type_dict.py diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index 0dd42c2c..58e5e951 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -4663,7 +4663,10 @@ cdef class statistics: rpc_type_stats = {} for i in range(self._buf.rpc_type_size): - rpc_type = self.__rpc_num2string(self._buf.rpc_type_id[i]) + try: + rpc_type = self.__rpc_num2string(self._buf.rpc_type_id[i]) + except KeyError: + rpc_type = "UNKNOWN" rpc_type_stats[rpc_type] = {} rpc_type_stats[rpc_type]['id'] = self._buf.rpc_type_id[i] rpc_type_stats[rpc_type]['count'] = self._buf.rpc_type_cnt[i] @@ -4725,10 +4728,10 @@ cdef class statistics: 1001: "REQUEST_NODE_REGISTRATION_STATUS", 1002: "MESSAGE_NODE_REGISTRATION_STATUS", 1003: "REQUEST_RECONFIGURE", - 1004: "RESPONSE_RECONFIGURE", + 1004: "REQUEST_RECONFIGURE_WITH_CONFIG", 1005: "REQUEST_SHUTDOWN", - 1006: "REQUEST_SHUTDOWN_IMMEDIATE", - 1007: "RESPONSE_SHUTDOWN", + 1006: "DEFUNCT_RPC_1006", + 1007: "DEFUNCT_RPC_1007", 1008: "REQUEST_PING", 1009: "REQUEST_CONTROL", 1010: "REQUEST_SET_DEBUG_LEVEL", @@ -4745,9 +4748,10 @@ cdef class statistics: 1021: "REQUEST_LICENSE_INFO", 1022: "RESPONSE_LICENSE_INFO", 1023: "REQUEST_SET_FS_DAMPENING_FACTOR", + 1024: "RESPONSE_NODE_REGISTRATION", - 1400: "DBD_MESSAGES_START", 1433: "PERSIST_RC", + 2000: "DBD_MESSAGES_END", 2001: "REQUEST_BUILD_INFO", @@ -4760,12 +4764,12 @@ cdef class statistics: 2008: "RESPONSE_NODE_INFO", 2009: "REQUEST_PARTITION_INFO", 2010: "RESPONSE_PARTITION_INFO", - 2011: "REQUEST_ACCTING_INFO", - 2012: "RESPONSE_ACCOUNTING_INFO", + 2011: "DEFUNCT_RPC_2011", + 2012: "DEFUNCT_RPC_2012", 2013: "REQUEST_JOB_ID", 2014: "RESPONSE_JOB_ID", - 2015: "REQUEST_BLOCK_INFO", - 2016: "RESPONSE_BLOCK_INFO", + 2015: "REQUEST_CONFIG", + 2016: "RESPONSE_CONFIG", 2017: "REQUEST_TRIGGER_SET", 2018: "REQUEST_TRIGGER_GET", 2019: "REQUEST_TRIGGER_CLEAR", @@ -4782,22 +4786,22 @@ cdef class statistics: 2030: "REQUEST_TRIGGER_PULL", 2031: "REQUEST_FRONT_END_INFO", 2032: "RESPONSE_FRONT_END_INFO", - 2033: "REQUEST_SPANK_ENVIRONMENT", - 2034: "RESPONCE_SPANK_ENVIRONMENT", + 2033: "DEFUNCT_RPC_2033", + 2034: "DEFUNCT_RPC_2034", 2035: "REQUEST_STATS_INFO", 2036: "RESPONSE_STATS_INFO", 2037: "REQUEST_BURST_BUFFER_INFO", 2038: "RESPONSE_BURST_BUFFER_INFO", 2039: "REQUEST_JOB_USER_INFO", 2040: "REQUEST_NODE_INFO_SINGLE", - 2041: "REQUEST_POWERCAP_INFO", - 2042: "RESPONSE_POWERCAP_INFO", + 2041: "DEFUNCT_RPC_2041", + 2042: "DEFUNCT_RPC_2042", 2043: "REQUEST_ASSOC_MGR_INFO", 2044: "RESPONSE_ASSOC_MGR_INFO", - 2045: "REQUEST_SICP_INFO_DEFUNCT", - 2046: "RESPONSE_SICP_INFO_DEFUNCT", - 2047: "REQUEST_LAYOUT_INFO", - 2048: "RESPONSE_LAYOUT_INFO", + 2045: "DEFUNCT_RPC_2045", + 2046: "DEFUNCT_RPC_2046", + 2047: "DEFUNCT_RPC_2047", + 2048: "DEFUNCT_RPC_2048", 2049: "REQUEST_FED_INFO", 2050: "RESPONSE_FED_INFO", 2051: "REQUEST_BATCH_SCRIPT", @@ -4807,6 +4811,11 @@ cdef class statistics: 2055: "REQUEST_BURST_BUFFER_STATUS", 2056: "RESPONSE_BURST_BUFFER_STATUS", + 2200: "REQUEST_CRONTAB", + 2201: "RESPONSE_CRONTAB", + 2202: "REQUEST_UPDATE_CRONTAB", + 2203: "RESPONSE_UPDATE_CRONTAB", + 3001: "REQUEST_UPDATE_JOB", 3002: "REQUEST_UPDATE_NODE", 3003: "REQUEST_CREATE_PARTITION", @@ -4816,10 +4825,12 @@ cdef class statistics: 3007: "RESPONSE_CREATE_RESERVATION", 3008: "REQUEST_DELETE_RESERVATION", 3009: "REQUEST_UPDATE_RESERVATION", - 3010: "REQUEST_UPDATE_BLOCK", + 3010: "DEFUNCT_RPC_3010", 3011: "REQUEST_UPDATE_FRONT_END", - 3012: "REQUEST_UPDATE_LAYOUT", - 3013: "REQUEST_UPDATE_POWERCAP", + 3012: "DEFUNCT_RPC_3012", + 3013: "DEFUNCT_RPC_3013", + 3014: "REQUEST_DELETE_NODE", + 3015: "REQUEST_CREATE_NODE", 4001: "REQUEST_RESOURCE_ALLOCATION", 4002: "RESPONSE_RESOURCE_ALLOCATION", @@ -4827,50 +4838,52 @@ cdef class statistics: 4004: "RESPONSE_SUBMIT_BATCH_JOB", 4005: "REQUEST_BATCH_JOB_LAUNCH", 4006: "REQUEST_CANCEL_JOB", - 4007: "RESPONSE_CANCEL_JOB", - 4008: "REQUEST_JOB_RESOURCE", - 4009: "RESPONSE_JOB_RESOURCE", - 4010: "REQUEST_JOB_ATTACH", - 4011: "RESPONSE_JOB_ATTACH", + 4007: "DEFUNCT_RPC_4007", + 4008: "DEFUNCT_RPC_4008", + 4009: "DEFUNCT_RPC_4009", + 4010: "DEFUNCT_RPC_4010", + 4011: "DEFUNCT_RPC_4011", 4012: "REQUEST_JOB_WILL_RUN", 4013: "RESPONSE_JOB_WILL_RUN", 4014: "REQUEST_JOB_ALLOCATION_INFO", 4015: "RESPONSE_JOB_ALLOCATION_INFO", - 4016: "DEFUNCT_REQUEST_JOB_ALLOCATION_INFO_LITE", - 4017: "DEFUNCT_RESPONSE_JOB_ALLOCATION_INFO_LITE", - 4018: "REQUEST_UPDATE_JOB_TIME", + 4016: "DEFUNCT_RPC_4017", + 4017: "DEFUNCT_RPC_4018", + 4018: "DEFUNCT_RPC_4019", 4019: "REQUEST_JOB_READY", 4020: "RESPONSE_JOB_READY", 4021: "REQUEST_JOB_END_TIME", 4022: "REQUEST_JOB_NOTIFY", 4023: "REQUEST_JOB_SBCAST_CRED", 4024: "RESPONSE_JOB_SBCAST_CRED", - 4025: "REQUEST_JOB_PACK_ALLOCATION", - 4026: "RESPONSE_JOB_PACK_ALLOCATION", - 4027: "REQUEST_JOB_PACK_ALLOC_INFO", - 4028: "REQUEST_SUBMIT_BATCH_JOB_PACK", + 4025: "REQUEST_HET_JOB_ALLOCATION", + 4026: "RESPONSE_HET_JOB_ALLOCATION", + 4027: "REQUEST_HET_JOB_ALLOC_INFO", + 4028: "REQUEST_SUBMIT_BATCH_HET_JOB", 4500: "REQUEST_CTLD_MULT_MSG", 4501: "RESPONSE_CTLD_MULT_MSG", 4502: "REQUEST_SIB_MSG", 4503: "REQUEST_SIB_JOB_LOCK", 4504: "REQUEST_SIB_JOB_UNLOCK", + 4505: "REQUEST_SEND_DEP", + 4506: "REQUEST_UPDATE_ORIGIN_DEP", 5001: "REQUEST_JOB_STEP_CREATE", 5002: "RESPONSE_JOB_STEP_CREATE", - 5003: "REQUEST_RUN_JOB_STEP", - 5004: "RESPONSE_RUN_JOB_STEP", + 5003: "DEFUNCT_RPC_5003", + 5004: "DEFUNCT_RPC_5004", 5005: "REQUEST_CANCEL_JOB_STEP", - 5006: "RESPONSE_CANCEL_JOB_STEP", + 5006: "DEFUNCT_RPC_5006", 5007: "REQUEST_UPDATE_JOB_STEP", - 5008: "DEFUNCT_RESPONSE_COMPLETE_JOB_STEP", - 5009: "REQUEST_CHECKPOINT", - 5010: "RESPONSE_CHECKPOINT", - 5011: "REQUEST_CHECKPOINT_COMP", - 5012: "REQUEST_CHECKPOINT_TASK_COMP", - 5013: "RESPONSE_CHECKPOINT_COMP", + 5008: "DEFUNCT_RPC_5008", + 5009: "DEFUNCT_RPC_5009", + 5010: "DEFUNCT_RPC_5010", + 5011: "DEFUNCT_RPC_5011", + 5012: "DEFUNCT_RPC_5012", + 5013: "DEFUNCT_RPC_5013", 5014: "REQUEST_SUSPEND", - 5015: "RESPONSE_SUSPEND", + 5015: "DEFUNCT_RPC_5015", 5016: "REQUEST_STEP_COMPLETE", 5017: "REQUEST_COMPLETE_JOB_ALLOCATION", 5018: "REQUEST_COMPLETE_BATCH_SCRIPT", @@ -4881,51 +4894,60 @@ cdef class statistics: 5023: "REQUEST_JOB_REQUEUE", 5024: "REQUEST_DAEMON_STATUS", 5025: "RESPONSE_SLURMD_STATUS", - 5026: "RESPONSE_SLURMCTLD_STATUS", + 5026: "DEFUNCT_RPC_5026", 5027: "REQUEST_JOB_STEP_PIDS", 5028: "RESPONSE_JOB_STEP_PIDS", 5029: "REQUEST_FORWARD_DATA", - 5030: "REQUEST_COMPLETE_BATCH_JOB", + 5030: "DEFUNCT_RPC_5030", 5031: "REQUEST_SUSPEND_INT", 5032: "REQUEST_KILL_JOB", - 5033: "REQUEST_KILL_JOBSTEP", + 5033: "DEFUNCT_RPC_5033", 5034: "RESPONSE_JOB_ARRAY_ERRORS", 5035: "REQUEST_NETWORK_CALLERID", 5036: "RESPONSE_NETWORK_CALLERID", - 5037: "REQUEST_STEP_COMPLETE_AGGR", + 5037: "DEFUNCT_RPC_5037", 5038: "REQUEST_TOP_JOB", + 5039: "REQUEST_AUTH_TOKEN", + 5040: "RESPONSE_AUTH_TOKEN", 6001: "REQUEST_LAUNCH_TASKS", 6002: "RESPONSE_LAUNCH_TASKS", 6003: "MESSAGE_TASK_EXIT", 6004: "REQUEST_SIGNAL_TASKS", - 6005: "REQUEST_CHECKPOINT_TASKS", + 6005: "DEFUNCT_RPC_6005", 6006: "REQUEST_TERMINATE_TASKS", 6007: "REQUEST_REATTACH_TASKS", 6008: "RESPONSE_REATTACH_TASKS", 6009: "REQUEST_KILL_TIMELIMIT", - 6010: "DEFUNCT_REQUEST_SIGNAL_JOB", + 6010: "DEFUNCT_RPC_6010", 6011: "REQUEST_TERMINATE_JOB", 6012: "MESSAGE_EPILOG_COMPLETE", 6013: "REQUEST_ABORT_JOB", + 6014: "REQUEST_FILE_BCAST", - 6015: "TASK_USER_MANAGED_IO_STREAM", + 6015: "DEFUNCT_RPC_6015", 6016: "REQUEST_KILL_PREEMPTED", + 6017: "REQUEST_LAUNCH_PROLOG", 6018: "REQUEST_COMPLETE_PROLOG", 6019: "RESPONSE_PROLOG_EXECUTING", + 6500: "REQUEST_PERSIST_INIT", + 7001: "SRUN_PING", 7002: "SRUN_TIMEOUT", 7003: "SRUN_NODE_FAIL", 7004: "SRUN_JOB_COMPLETE", 7005: "SRUN_USER_MSG", - 7006: "SRUN_EXEC", + 7006: "DEFUNCT_RPC_7006", 7007: "SRUN_STEP_MISSING", 7008: "SRUN_REQUEST_SUSPEND", + 7009: "SRUN_STEP_SIGNAL", + + 7010: "SRUN_NET_FORWARD", 7201: "PMI_KVS_PUT_REQ", - 7202: "PMI_KVS_PUT_RESP", + 7202: "DEFUNCT_RPC_7202", 7203: "PMI_KVS_GET_REQ", 7204: "PMI_KVS_GET_RESP", @@ -4941,9 +4963,15 @@ cdef class statistics: 10004: "ACCOUNTING_TRES_CHANGE_DB", 10005: "ACCOUNTING_NODES_CHANGE_DB", - 11001: "MESSAGE_COMPOSITE", - 11002: "RESPONSE_MESSAGE_COMPOSITE"} - + 11001: "SLURMSCRIPTD_REQUEST_FLUSH", + 11002: "SLURMSCRIPTD_REQUEST_FLUSH_JOB", + 11003: "SLURMSCRIPTD_REQUEST_RECONFIG", + 11004: "SLURMSCRIPTD_REQUEST_RUN_SCRIPT", + 11005: "SLURMSCRIPTD_REQUEST_SCRIPT_COMPLETE", + 11006: "SLURMSCRIPTD_REQUEST_UPDATE_DEBUG_FLAGS", + 11007: "SLURMSCRIPTD_REQUEST_UPDATE_LOG", + 11008: "SLURMSCRIPTD_SHUTDOWN", + } return num2string[opcode] diff --git a/scripts/slurm_msg_type_dict.py b/scripts/slurm_msg_type_dict.py new file mode 100755 index 00000000..dbd2d0dc --- /dev/null +++ b/scripts/slurm_msg_type_dict.py @@ -0,0 +1,46 @@ +#! /usr/bin/env python3 +""" +Parse $slurmrepo/src/common/slurm_protocol_defs.h and create +a small C program that generates a mapping of the numeric +slurm msg types to their symbolic names. + +Example: + ./slurm_msg_type_dict.py $slurmrepo/src/common/slurm_protocol_defs.h > msgdict.c + gcc -o msgdict msgdict.c + ./msgdict +""" + +import re +import sys +import argparse + +def generate_c(header_file_name): + typedef_re = re.compile(r"\s*typedef\s+enum\s*{(.*?)}\s*slurm_msg_type_t\s*;", re.DOTALL) + symbol_re = re.compile(r"^\s*([A-Z0-9_]+)\s*[,=\n]") + + with open(header_file_name, mode="r", encoding="utf-8") as header_file: + header = header_file.read() + typedef = typedef_re.search(header) + if typedef is None: + print("could not identify the slurm_msg_type_t typedef in the header file") + sys.exit(1) + + print("""#include """) + print(typedef.group(0)) + print("""\n\nint main(void) {""") + for line in typedef.group(1).split("\n"): + symbol = symbol_re.match(line) + if symbol is not None: + print(f""" printf("%d: \\\"%s\\\",\\n", {symbol.group(1)}, "{symbol.group(1)}");""") + else: + print(f""" printf("\\n");""") + print(""" return 0;\n}""") + +def main(): + parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument("header", help="$slurmrepo/src/common/slurm_protocol_defs.h") + args = parser.parse_args() + generate_c(args.header) + +if __name__ == "__main__": + main() From dd73ddf65c79084f0b343de5d51fbf3d28246f35 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Sun, 26 Feb 2023 20:50:31 +0100 Subject: [PATCH 10/48] bump pyslurm version (#263) --- pyslurm/__version__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyslurm/__version__.py b/pyslurm/__version__.py index 81a9fc37..416525d0 100644 --- a/pyslurm/__version__.py +++ b/pyslurm/__version__.py @@ -1 +1 @@ -__version__ = "22.5.0" +__version__ = "22.5.1" diff --git a/setup.py b/setup.py index 5fd61c9a..c36f279e 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ CYTHON_VERSION_MIN = "0.29.30" SLURM_RELEASE = "22.5" -PYSLURM_PATCH_RELEASE = "0" +PYSLURM_PATCH_RELEASE = "1" SLURM_SHARED_LIB = "libslurm.so" CURRENT_DIR = pathlib.Path(__file__).parent From f3dbb7382c619014120758ba99454f65c87c6a99 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Fri, 10 Mar 2023 09:21:56 +0100 Subject: [PATCH 11/48] Transition from sphinx to mkdocs for documentation (#271) * Transition from sphinx to mkdocs for documentation * Tidy up docstrings, mkdocs config * Add logo to docs, hide navbar * Reformat docstring indentation * Missing newline * Fix more wrong indents * Add `mike` for versioning the docs * add site dir to gitignore * setup.py: Remove .so files on cleanup * header.pxi: Rename all "cpdef enum" to "cdef enum" cpdef didn't work correctly with Cython 3.0 onwards, which is needed for the docs. Setting this to "cdef enum" doesn't change any functionality in our case. * Remove embedsignature=True For building docs, we use Cython 3.0 now, which automatically provides full introspection functionality for cython functions * Build docs with Cython 3.0 --------- Co-authored-by: Michael Milton Co-authored-by: Michael Milton --- .gitignore | 1 + doc/Makefile | 130 --- doc/doctrees/environment.pickle | Bin 987435 -> 0 bytes doc/doctrees/index.doctree | Bin 7785 -> 0 bytes doc/source/conf.py | 233 ------ doc/source/index.rst | 118 --- doc_requirements.txt | 6 + docs/index.md | 52 ++ {doc/source/_static => docs}/pyslurm-docs.png | Bin docs/stylesheets/extra.css | 4 + mkdocs.yml | 19 + pyslurm/pyslurm.pyx | 789 +++++++++++------- pyslurm/slurm/header.pxi | 50 +- scripts/builddocs.sh | 25 +- setup.cfg | 10 - setup.py | 2 +- 16 files changed, 606 insertions(+), 833 deletions(-) delete mode 100644 doc/Makefile delete mode 100644 doc/doctrees/environment.pickle delete mode 100644 doc/doctrees/index.doctree delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/index.rst create mode 100644 doc_requirements.txt create mode 100644 docs/index.md rename {doc/source/_static => docs}/pyslurm-docs.png (100%) create mode 100644 docs/stylesheets/extra.css create mode 100644 mkdocs.yml diff --git a/.gitignore b/.gitignore index 6bea6e8b..f79b3369 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ # Ignore Build Directory # build/ +site # Ignore Docs Directory # #doc/ diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index eb9f6570..00000000 --- a/doc/Makefile +++ /dev/null @@ -1,130 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PySLURM.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PySLURM.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/PySLURM" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PySLURM" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - make -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/doctrees/environment.pickle b/doc/doctrees/environment.pickle deleted file mode 100644 index 3b32942144717aa7f05de67a955122ba5a36cdd2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 987435 zcmb@Pbzoe_)y31M!7_!ZO(BISiGxkavMk$5ELjyeZrXIaUah1}R=e8Sm2IVU+q6xY znVFfH+po;b%*+hmJ?GBc2m798<@S%n+Lhk<4c@tP2i^m&d0MTzyx3^%8>+YVG+ev2 zRB!c;+_chY&W)zN=v<@Sx#s@P!Kck0HrJeQH0#~YwZeldy+-rME%IHfx>TQSH9DIc zwce4PLvC|b=g?ZEvRkxzjaH>{WVbF~A9mT*Q=P+i&mP}s_3BR96+AgtX|(nTyzN#y zN9>uk{?2u04{5aK>ias^tIf4*NJMw2 z)t;+&kL2GR*}1+X;0F6UH#}|j2v^p}4gJzGc_Ij?dd!;8rH{Rd5$!W8P z&#f#Y4c*R7FPlBA)~t5B^7B!b%^tqHTD!v4=ewPwFPlBMG1u)JbJ^@6`lrWUwlaHo zZBa_f)mxq8F6$hhPsmJtpXB(2T{l1Ipo1n?r?<~ejbC^Q|2bt0xu37r>ZkPDr!24b z7Tc}PiC16MIcb0A!H0UFz%pSU26@0IAv&9>%?5@rp)~hZcd7Ycj9>Q|yoVv2R zJA33@d#NhLRqa(fx7gpgx9R767(aN5NvoL0NltnRKiyPezY z@7z`jXQ@4>C4IZuYcH>!9zM<<^pM%Z7wRqj-R&1|k1r1G)~l|z*f~A?Bwwz~9_j^$ z_IJ)WZ80ws8d9@et2XOLR%Tqa)ooULlB7x3-nV*W=gi!nEUhlodp(x!S+my}Z_lkX z>r$j>4I3VERp;!*vlcg8-8ttWyC1s9bS<8_c-HJ8`UmIAADkzDaPHNe^YI6#FP=Vo z5dYxX(@gs*lhuWK=K}e&JIJ41aCPTG`Lh+tx@#}AmyxP&=Z^b3cbYw{z5DWdt=H|` zd4K0FE3?vw44K)c?ih-OiT% zovr+fZTj88uG{O3?eAQ~FE93A)}%LV-`^SMmlOPQW%jxY&Gzm}uU+f8y6Em$WapZk zy{@afdUdW+ZSJkEb~|^I?>keA*Q-j;n?_UM-_JE%XJ+;~^63o!O}8^UdlTOl8m)y& zt=*byAitfR51&1v*66WHx}8h*3;#B=*XybpbQg@)R0;f&hp)#b(+D=Ij&*js9zI=Nb?KYNYS zd!2jlx;_!x8{J-I>*!2ndhF8CPW80Kdo5OWofpG2<*C5PC{TO%uIo`77O^s2?~V29 zOP!iZpBs~&lZS$IT7b%PKGh@brR8RQUu9&mUb~`GSLr*%q-*+AP|t`^ety?YneeHV zR)wajDpj_pzP;UE?kp%|V+T4XJ{bUfELhonvff zbiC71=tvCO^^b>q7R!<7d?R2{Xk+dwCE5u3N|KLLwYTer%*V{?a=o&x+TBvG_Ewyv zN~JG|WuPa|g~aDHsK3WIvf!H+f_D0O3h(pXdvs5=3Bw^nqBt+B zsvI>+jaI{?GYZtc(zo6%&30AN+>}9Yi;N$zUY+|W;m)zN&*RyU`T`+>?z`(oEdA+b zLz>W5F@G|i)MTLAuapa8lrA0&l)MTN_w(g6vm#cI2w2$rtA4bb_wZEE&qz@G0lTiv z^wX|Sw_UGul?rc&CA@D`z|+)xpf8ikmbtNR%CdGIq|(tQ*v9h$g?kdB(yX~g9+MS0gFm*V_gJ1)Z%(opZ%~ZYG6_4++%DXtm>o3N&WXXTru4B`D89+xCt8S^f?47>H4_JdG zO*1D)uV=SwsJROjsMc9*e9mo6?_&Zyg)rD^7$qF2e0S9oP zC`js4b{$D7jnzbbZ|A8>IV(o#vsLmi6`$q@H}s1gwc3hv^|{W|RW;fU``blYhWgL& z=2aSCb`3i~7fuv92=}=aMV#K|` zH=dEY%;wN|UN~S))Z~F6A0DRSi~Kl;DyWE;Ioqv|v{xif@nThuPEinK3d(sA5?R+@engNN8Ois-4RSj)f0WW>P9_LjI<%m$>UUgg>RX`0;%IYzG-B- z^Ga2}ORQy<*q=m+pjYj>F6{uvklAI4_|!YERzlPx?HfGwdFs9=tbT@6@eOIybKUgw>vAFbss2kM}m(V+UfazUi* zoaWt1iCW8&{*@6U?mfQIjj@Rc^vZq+;Q zAFwWp2VQAWP@(zR3OhHg$gB zI{&BYn_}v)oV8FSWokrzI7})-Xkv4{iJ`Ic5k*C(uBAZ z8+#ga_0GqX6m=yEhnb8NSs(X)mS$T^uJZ|HM5p=%1*Zy>{$y^{fmPYTpmpa{3OLYF zCs88k)4qTE@|tOHs!BP3Mj6rmU06`6LG{o2Z1_Oj@Z6ltoX4v**Y12y5z$&NjOe9= z$oaf)&YNY>oM|jc*h|U_ABJ0V_}uw|(xRR_M%|t~5L$GZAgN#UEjqXcBW>wH4OwLP zlG376%QC~|bqM+Lt`phmC#$Y2v-0T%!p8i7!9`uz%JDFgm^zM4J*7BEIhX5WSvY5x$}FQFpMU>KX(Qrd)i^#|^FW>gXRhgX_8`-gWv@)|}c5-rbs`DLXt+zeobqM)xHVB|jq>MKb z1Q1Wh2LZmPzyokMd|NJ*CI9>0ogA;r(#{;`96uPax|aAz=vs*Yb${qh#YlCzD~oM# zrO=;bsj%}S)sI>{iI3sY0O(gD=*PQ`kbY8`T&>h9-Ib-zPgH$?IRY1KRNWq@Tme^vu-Q#MaI~6cr7Rb&F;+CPT(QebdW@5qHR3;9p9Kwu$^e-7iJb zzx{%cI-pba1z8EII}Qe&|0pWzUlsVF86kk&LDx9wARqi47e8cC*0vb6$JrY!1tleDi)*Pxwhjw>u!$oba?Eb2`#UJpoZfMIl>Zd_S+*Edvjl2kIjLxzYO0HG!5 zXJHfd1sUGN2;9&pQCGWgDi(yuxe++pK8hWGcVojv`*7Yu6)Tc^6L4>wZkHp{nzc!} zn;J4Y>=!o4M2f7Vz|y|#%TlNQ?r7sg>#MLNRfddXz|dA`wnT>G&Gq@}O0(B>#~Nxl z*2_w5Q%aA_bR=FV2HwRkxCg`pOjAf@9DLMx!*!Lg`l)eR|bxWQkqVASm zUH;GQMq-lp_1rBD6Scm zo##Fvch=yv(tvLeBW**HyRg=Kq2}HX&OW9-y|TL}`{a7=js_c!RaW6Bozo-rPM~57^W>_}+2)3GcQ#Tq%2=2! z(V+TWpssDf+B)`Jy1N=6>bjM-a|n^L5ft4fQrR^&F)}r}#SI%ES}`&f*RY14+ENf? zD8C8ve#>82ossr|2$UN!Omr$)nh&o<)@HDD^xd>FKQAF|1XLt$Ima2LR*@SuYBXfO zn<`-tDc2-@3+Os#S#|Mrf!k`t=v=HcpHw1eTf*^;PJAeq=a_M#!&YI#^E#wllu+Qe zh#}h9Zp=;goV(a4(G4vHmdLA+u^kL;8)0Y+d+pu0QKIf($+#G#h?+=H<}1pipEJQ7 zhKdfsYj~bsjkHP7)Gl~yB#TAvZpMk)g+dD?8dRTxx^C$34FEG0H*NCK5u&j6AwvBb z=xamHG%zP-DFkBvn+B%VZ6_$l(RSK0%I4u2T}GK69oaFl*%lzDU$95l5S4*?Ih)jx{AS~#=WpPrnE@AH)#3kx2)j0 zs)3?|RbjW1>9@#`u^Wuk>6~C#Yleu{P2aj<+NjP!Unf%I^(8sfxU*5;%Rx#D(A5pJ zG1lU;V^g6V^E97g`i+A=Njfif3&tFdF-yWt89hQ50abUbZ<5EoGp=FOsL@ULtztDY zF9-8@$%37Y(fPhDV2+9nzQVB47480jh99K*Wyx=XuiYVxX354gw`Abx%&TNPOT>t4 z0jE}kr%>SLth%;g;uHS-ITk8Vdl}l=*RUGIlfPpvS$*o5d~}RS*IGt|1P6jz;80k) zmLmJ6&)z`uD<_#;*LYiFZL_4*3RIbZUXsO-02Z)YF=TX%D?w6+kUa^+wvONjx7Q$1 z4?aJzWrRrC2Z~01EY9IpjS$_|SQICr@RbnOUYzo1x$8;R&t;j^-Nz`=sO}n`ZAy{c z`+}Q``%5zvSKsY7V6+>p5%*UyvhJ6#hTTGCwzVvWyBR=p_cvD5(XBe$P@PjD7O08d+IMecA zq(u#f1|j|Q%w28dXjv8nBE1R`4*^0a4V;)@iWjCP4>d}(@AgeiQaa>33>@`>%|K|Q zf4EViowv+FP>7gE0HaM~1Xm1btkXTx7*UTSvvV}1LHS2PKI5a&I_Xr_J=!QyA1&){ zcz-XYMcQLPOZlEsZ*o|F9&5OdHB{6~OOG7%I};gF9tX;?Z18?|3%Cckb~< zi$>TBeZssTxlc%P=eZVKdZO{7O`?z&gvfakIGOpEUubntHcYgA7y8s*hm@y)qFq&- zDw*F;cV*SE(sNHWNVMrS@~abGgMg<2pw-mB$AFs)-O~*fonYlBBqfq0KLccSjU+DO ze3yEr!J^i-Ay0*o(GDyIJjz~`|7nx33W8~x#t@sYPZwfHmgF$3&8M^dv0{$yvNT$UTB!; z%(QSVF1vBcy$CFA+c=9Q_CrSHJu(A+v2oVrzh*?3o0ou)@n6-^v=LWaSl4qeHD2^k zZRvnpB+N9t3~9j7Av*@WLB^Wj%Z(YeMfsf{86h%W0fzdPJdVQ+3a>Q6hL{`Czj-BW zO?VZQwT^6mTea7ocds_}Xj@oge;cHjgV!WEU{~gXjQNz8}kWoWh=;@`!NPHWJ+8hy%q;bLz z?(Ig3cBlMQEh9wAJ3!IVBeiXDVs?CVYHY;4(@@b&jj(myb2NTNpci6Mau{>*%sG8{IK^LCz_*L@eb=mjJWp!r{f_f!t_9OXTbZ76}21r zipoim^nrwAlV3mU{ovrF(pj%ck@O*uboA9p6Q-xQ<-+}+fuciv!K^9B5b|L_)N-hK z(kpyg$;jzGVzj8!TsR2RIY|kU`%!RDrjgVpxNCenHq=ek=cnq+I6%cAv+F))^k}H3 zph=dfG6^3?60(80;jV4$`-E|$O*Gf(5~Cl)i2EdP+C-&rudKJk{qQd6r;HWtcHLyc zpHm^^(}0}7V)Z6j|IzKA6YWj1jr|Ju8RJFQPZ~vrxj>Wj&w{RVaBZ=vZumK4M(t%m zn@xlW`aD1br-Y!(Ev6=2y_C>T@F!kR|DBc$q^EWJaI=XKSPAQx;dmU1~ z4N92(&B^grv8DAYw;0H1_Z=feN1%eLNlOv-UBI;2sqYeG zWWLbGx)O?DM9KZYDAC*$mRll4)(^q*yPfrX7h_fKM}~>o#KJ9NR3YKVAb8&(S;fUv z)cwRD(SbDW6sH!0Aw$YfLD7I+YMZ?KnGvF9FF)&&<6>~&)pV2bb1>Ap`+C9ReAt2} zU&(Z~>waO#sC6%_paM5;n05gSB51S{s>Drh?oURFb^(w=q2^>r`Ex??n?6I+bbm2Qv>O!m<{(7QU%}Bh zo;ZpAf(Z9FLqtc7{+LRlL&D!d$hwAFlbFW4e;DaNUBg6-xPJn79Nii1PZjA8>W<5N zpeq+g^xVG;8eLLb;-op$RGA{l{|5P{B#(HL0?$7zoczb2(HVR#YfB2$2t4STR6Q6LUuxC2EG& za%4q{s3U>W=x8cp%~k!1j;Y&Zw}QL2QKRAibl%pGB>g%`M$z3lrI?n>Sg~dMx`vE4 z^kUtH|xUSF9{cKHJXS&~VW< z2@{{YTP3AL)Qy1h1D7`$GTRCksDa-g;uU1BUtZlZJ20z zNt8IaFJjeY5gG!NJ`K{|VWFYBxYgd!4~`mDi`mcY2nztG8!1{rxmaYs7;!_usS&UswuF=9#%p(mv7$qM zX%JhbNIDZFtuLPE#MEPZqgB_P67DRcZHo0F39sc&cNPeexdBXVe%|>-%<61IMfWch zI?y3*bq>@uM6_b6x^O>B-KS1vweJWu@7ito9b&`38(u zSlU}EkRt5@&`y!cuGx^MxXfq`jA|<+xx@lGJ)pD-7s$QEe z?A+1#n_~Rk^1K|@udW8GQY1*|odC@&;8%3A<`b-!)wnwwJGv@SQp*LZOu$``0Bv60 zACa(VyBY#5cQtU-6coB5X)*FPg6Bu-aNUKpGI)`0*kDm>y4FEPXpyxk$(s)06^D$ywtEg(q3M5qeh7iLw&>iN|~8!O-??Aj1zNTm%B9nX=Ktm!;fYF53p& z#fFHEap@|_>yWY?6m8jrHtULFclLA39C&1K5qP`PArw==f z6&(w@{qGf^2IVIcd89xJO|BEEyStfu)Wa>#iWeed3Jjf~fPs~*eKoX!OB$`Y_TH|1 z>81@BHAlt3B3067K-WskPEvHI-mHN)#wu;?DGKFC+zFx@B=cUzrf0@xwvW0?3>O{i za%ML#Mbs{!a5dZHDt5@jOSroc&WL3ww?97k2bU(Ia{7j%_b}0@$ywt0pU_pk026yp z=w?PH9YUo3^&&nUl`k{afp0A<5@s6ig*2$&=*L58^IWZW72`&2Uao3#N(9{-AYB#m zduln2bXB87=lz8nYNNk&$jExU(cXb0r zhr3*fS!14&D<3IF+&pmFb5iHN)Z#4|ELuI;HPyTlNs9@|<_~`0at$Lzorcm0gx4bL za%1p%`q(ZF~SLCN8YKW_I_Zl@? zqJ0bUUW5Akpsytw8j&T&tr{fS-%1*!X%TiMVCrvVs}FZ`_^Yk%W6Wqkbgf!U6iL1> z$fYBkxF|Zp?Kk-UW`qmEOvC+<2DLV!sBvL{oE9JE%l_Q`jUMffCB>c5BlH1)>Nvsu zw%+~IB@uU(5u+9-J;Co+BI|((D`e2Z7w5YN8Ed^`Qok5+4+c&xDI$3c17l_(2THCs zR&*@S#RF3+gghjHz(tmsg{=0i;C6ZUP=iD>RO}h2#K?LWSh^hw4l!cr?%@WB_VXey z7#h@m1hj8OYgXp!r(f~l9%KPv&#eq+~Z9>>e;0yvo)#+o;?qD>pVTiFVwpyL(x)3NYS^-y3-U@NJp&{^)vtH_nU1~gnFfhg zI*xS6`4BteC9Z9dA>mmds4b&$ov!YwdmH`ibx>p#Kg{oPGNe2Q6m7n^K{W~YJl7b} zaL;aeFfcMABs>oUtrKr%SbuJ1x#ym5kZAb2)TEdiX)j38@PO3F!QsbN*>#XU1lqgX9 zWzg1&N=^BAE4F*N5u%QCE+=^@qFw=1w%jnv`Z9E{G+MM-E;0p2U4H2!Q zHC8BWt@sT9!A(rgB{;gO5o8x6KG*5q<;p7G3lAre*D7wIqHk7peDIIb?5b`1(TXMYD zeb5k5r!0*~Nl^VmP*=yFZ!eT-oclkMj{0Q%FNqtSq3(yFtIcWVQrxEF{k@NvcC=*r zx>-txjE{oheIqFu44Rm?e9REhQL3A~N*D#>6)663h-(Zc+^df(gS6#xs>a1S_X(p# z?^r6iuD?i@N%$m^pdH4?EJoUOCuhj^xlb82+L8-H6+wucPlKa@7;$p4@56SVF;cX` z8rl6VeXoskp9M%qM=Xw2M%Q>2Su9&YILUrt>p4-_XR_&tHY!u zsQyK$Yq!_yN0O5t?n{P<)(TIZ1ZOrcM9`NLP%xLcoC-I6x~~{0>P%$g-gzaGz6z4o zzd0=k=iGgCc*=dvP|>+lzQgv3k@j`aQej1@BAJgYxo;RKIy&}+6}<-aznSDFOq4N@ z>F$be86@hv6i=2-imY#grA^J3EIp*}7$xe2Wm|Jfg!Ag|nTKQO@h#{hYrx<7=j*0j&qB;Km&eq`EFm!hzEs6xVzK~M`B_8E?K zCFIbUb3ZXiw2Rb=_ZfO6f_@5+-?A=V)8wkWv#YhOE&CaNW}Ikd(&I2zyb~2Nex78; zH(YeW>9I>k-7gFhukNBfBteLrUnY6s=299k>=kvtGE6iteS1qyg^XW=p$%8|Wzq+f zFt>O{zcEC#m!}5jj0OR}O#ld3+WV>9?+g$vs(i35BSgyYK{-zRNZU3BhqWaKReO9< zid+%t{$Rl9zW$O)WL}c!KLV{SE`$WcwZyLeKN&FUESHBDREwlPgLDEj$xSr6FBLz^ z4yGMaNU~pfultKpql+WW{I;AzMY8`2HYR_QtDMJ5ufSlS?ce>)n9)AmH<`^Sk@a`5 z)L8ibCD%Zy6Za1TMH@}GsDA|^a{dX94yXR;F5E?3ad!VQQgi^yj@*4xWc?c~Z6*FT zgr>qkbi_jsi>js^2k#31(|$*&S`1_!PD1;t#Jw$}ym5#e{Q%61tP!>HAjQ zvBrs3Ov$;yAVt)1KxyA)yGsQXjyF=&c=t89M23_TK=E-se4QN-OUU;DNii+ddnA{ zamHM`)*V`==RVYS3un|$uP!&vC|&OP(cBtO^lx z3t-fd=Di7e@tH(`ZfTV095dayG9o103IuJ2!N+Jco4f$~*2akDq}W;ULgbtV&dIF6 z{&(D|AEvu+Z)4QxPMG}AyQVNxaa*J!d2jKuc(%7QUepw3uCbJmu)O(L6exUq2y1(g z{r^kL5{dNA<>{s$HG}#5WR=Jn0>?+Uw#Z&-y_3M5VUTE5i3ht<>&fenx*`4~`#pjX z8E1lV0;?)Ge7ZoW_x*{9$93gr88Uh|PtF%B7bSiJ@JA6J0wQ|5jW5u%4ICYg3*!ee zHNagtsz%~DAnG11?*gQ+%)?iFmEE}pj!yOZ%rf5L+?8|L^*P7|Nz%^)T^$gZwh6rV z7}pz)?yJ?8@!~&szJa5cpA_4(eoc}u09jo+3)XS6uFD*TQABAVkhaaMULFg#xrMH*AdP5K*{D01@hMg1*jZy}sYDiVe0S zMu~bwg_J~!tj%C)lSF5rGcnt8)kYV~1#Z-6n_`WlxF_TU$=m{_y0O+aWQL%}EZtT^ zM$JuW%P=+awt=UEdN73}c&(TlGgQ=*Eou7HA><-J)RWE5ZN+)G*f`O-T7F2%Ymv1b zEOj!mAQSc%xpCt}^V7&}Q%dWQF#(2F62{kNeV@h^)l}^;QgpVP8N?@7`^AWx1Wwyw zWqV!rL`~IeE3%Tbr;e1lyBY5Q+F^kpnNwhDm8E=K`LWe@(*}&T!s3Q6#tAMh)kvEG zO|4UCg%XLuo?tg?u&B#Y(hj{6K|29TZMEd3#FrRiL#!kB#rEt{;$2YA7^Y6HqSd@qpI4Z_7jJFE1k8CaA6hRkjDw%uIURg66ntGe7(LnVrY-y3i>iW**0ms8F+ z8!kGBEC?)l6(V*6q2nYLzho{X#~I{olmX6(<)?4uQ6;E8m#AX`D^<#q>~&L*PA_s( zV6Q;&d5E9FM&UcFtr|#=P9K$9Fy449H~ofUZEdDx5h+nStoIBdrPYE+!xkdTKiGs#&V z9HhUGF`}k9waAoHA>zJ3XuAlD3kzbR>(1>rMl>G<=6|ZLGKAa@5H(#~E%8_L`rYpL zH&ApGPe;`HmB@MkSlR@O_m6R}+EvDkHo<{*k3p392Li7))AuvDtvU;pFJyS{x(6A2 zBsNkM_P9Ppk{=9m@YAtOG`1k_YqKhToUj|BSp=l ztk+lVFh)*?m`4Gleh^B{du8s?hKPD9xgzrl6n_lFwYl z1U(KQUD%>G$4HZVyb+?A=yT?%Kw8-q|hM8j(>W@o6Ax zpA9P~+v}}`-lBWDvEo57U2_RORaS_YX8@yiD6HgR+}yG0qS28V_e?`Y;{_#ad5IWt z&q{J-TlF|rx@Q|IIGGCPB1r~|B zG+%6>Gh=y4*`n|`WnThWovNn?`RT>BW3Yu|{7a1!otmsM#%DAMe;ME!bhD`JTz#K= zx$&Y-S)t=M))EyGUIBvIhS0%uX;LkPd!>=0MsUrQPL(6_RY0n72tg@7gMYQbqQ;>l zCZ$5eyapI;g8`%0!{~v!*BUEo^h#JMH3DCk9m;cAVQervjAz0Hpmeq?eszJ`>h|+y{*jbr6cn zJ|#xphrrW;o7=9v2Z)1#|7Wb|6t{0PRw6^nhe1h&IEUqGZn-JuBSttUR^5GGu}V<= zqfk#y9^&eokC}43AtfF?M$vHF&Br0CeLAeg$dRb#aCfsh1fMWMv`?2T?WEvh+Cw;VekWA7%0(ZO1_DdXkDUjPm0|UELTgvW$0*K zrVBYIL&~>7(XJb&b<&uo80|bs)tY;8`Yd=5A>oH0XlK|uwp};uhIf?x z$RO+L40#=LehiL|MBb#x9&hJ2;M48<8UDRO=bj)v5NA1(XDQ>T4?XT0drVTnJT(IfQtfTk=z%r-Ar zm;U<)<3(3u^5GI%|CAP4e@rqM99f-l;timZV)rKlMl)FA7-#ed{WG8l(#RMgyHV#F z^YiX6#)&$6nUYgsvlwkJ~^hPkdGztZzr(ePtQtMf_(T@xTJN?ln={=^vqD9h4AZ09&-e}6@JlQP6 zVdP|kMprRP+5!Ad9dx}W;imwu5e%LHAFsQGy1SXtqN})lyF;l!?VCeeTQ)tmsXDS9 zX8xxdBU)YQVYpw3tXqJkA!q`i6Vut1yB(10ki@Pui@Muk$moljaxK>D z6sa-+lSqJHA6j&B|DwjLu>^HDgGRf2+I>q$QNk+J6eyWhH1Sj2v;m@CKw)DQ4d!7c z$pZrq9LF_=F>8pZ@hyyDBvK^pO!C3YJartt#30dexbHeoDo}eDw6)EJE%n3Q@P6dG z8zMR*DcMPHT7+E+n2sWT>2yk$PVZr$Xr&h}ovI8O_XOi)wr$&_pE?0DGPPrRbY#cG zW_Ot}qxTV%?$|F9W-9K5ROlu|y1}D1sbtq(fhrR)j|Awf%QsHHS9Y?!x9;46fuqBBVG~V@k+%q*&MXU;B3;9X z8)M}>z(ORMmxHO*TXv_0yTY*1h(O+Wm1&aQ1Y673{0JFV=X6@_mJA!6GKpEvHmm|Y zB3nS}FypIqvfXX;lh$vo|pPwqXz{lJ1F~b zVO=h#oU35bcVzqU^t9_5A?gAZhSp4pq#j5&W6$@acmGL5?**+Gcp}y?OTC~nai(Pt z(xT1OZ1~LB_$V{)PX_L_L`0iusrk-nlD!XX?Uuodk?GgWma{i_3$$wBXhSXWiZXhH zUI}Q%A0%AAo4v zvOwv3Fm8M2`vLB6w5YYs-5lR9M%V+A+?ln&0IVo0zbI}&AGe6vD8)Nw_SZB^(>O<-yK-CHjdz8d8(&pqI zX}qYfT(V8cv$ZQI8m$LA0kUDk@6T&j$zBO$nTb_oT10yz&nZ` zYrLqpR~i7$D3bd)aCK^i5!0VLeY~-vQ?vdtQ#Gjm1gLA-X{-0UK%Z#((bdRqQCK#a zHa!UhjQF~_V%fFl+>=fHoLGx1*q;FjCjKdq_WLt*12clJ?y068b@9bQ)oXTA(3?Am zQ2%M!q<2=@J@<4IKaiwDg3`}`bbbwuH{d+e0MS-k+KQ4paGnKDZjP54%ab|Yvke;^ zFb8oGOqjWS4pQO63wq!Q`)HnP$Y`9VY#$Bw^tC8K`o)QMPbLh_I$jxA zpL5SQ;O1DfD{L73l4QRCY^_T!jp2XXV&z_Fz-V3OCkR<3a$XcT$-ph~8?1-E*f>#( znzwX$Es|aWl13kJ;$Wn`yy_YYi#_*J14X-5dMwU}knpmQm&w&|;J{m+mm4BlW~G-+ zCD!K^;CRmhcfb0>0>jw!?Oti9sAti)K9d$=j$Q?lwm=+u(|s!vosb>CvexA{lf2r9 z(eUql{=y7ygvkk#{F;R9r^ot_EWOIRmvl6AuQl@KSlch0BKJ#@{W`FEvhyba;x2p28g|`}gYP~rf2*6am4XM%?zdHTMelv+{!Bd!%Dt@~q zCK_SNMj_TxWeVPb6!-|pv_BV%6SC@SxOW;j+KmcF4n1g_2$K0OFx8uoh0$Ks+oN|I zEAETSy`a6ulEouaG9y+UzBz3y3-e_ zx{nP`$&8^{8B#tDil6y#oaU{&&V9lV(b+);kyYwkP>!MzOqElz&h;5%MZKGJZi=+X`z&~wB}|RCaEyklqIq?nGwy*eVHPSg6`w~c z)L$r!;=|;-FIW2I49GPDSv#$VWCnraz+&2vO zUlbsvi25c_y1MT}^RiQ`?KU^|$RO;#Ww2=HD2&dhv`G6lXxim{Oib(-1_t*XBSjsb zoEgpv5%XPObRD3wOZI(Cjc#$@GeFe*%KJKMm@SKQ9mcC5L;3GR-n;#&mnQ1D${!df z8ZpgJb<=NhOg1$A5HxKJ*hwRkve9sLo%@l&qQT()Ky+S>dHZofO9xA*_sRjApBOD_ zmeZy(uS3pH!O_9R&h}#4!Ox5lcPN_GuHCVo79rs00H}){7SuU$bH6Y`G#4d{>B$7} zmw@zj)al9Lkx}<6ptAq%d$#M@y1V613IPPak~y(t+| z{s@Xz9|o`RY7Kdd-h%s+VWQ1sUY-qxBCkWvpTW_#sHQ%+&wnvUG%q?i37Cuw8Gi*s z$EmOjP3H8nIgGC(kj-I#GgfqIrDQW&Mvu_H1FH5M2a!{!LH=R1XrLv38YHVk&OgD? z#uv;z_F{5}nERKZqJvb)&|zAH{TncC72YqE} z4b7%>L3!2BAx4T8W62^-B1YVy!1?7aydsIe)&co2V@2(7en8%wl_Kfzgv2E;9=nij zJC&Y0!bs8H+_%(~$dGa*DB3K%Tjuwz_&YAHZK!CoD0Iv+Y9w9-W&pC{{1*ELc! zfY8leKoXX&D1SZ3>$YON4uvn^aMw5O4Kdd_W6Fa7m2Uv$OnVN7;f5xEVC~uKka8nX z)P#q52H$n%ZfuZfrKe{a$#jJ>WZVRdW2GeIm&ue~r>Zig=6H`ZvZ^_AqSPm}Ny0M9HOj$ulAqT!-5ocuD1jU5mo<0LS0-k3BI^~O#%Qq;PZ zdSgM1v{OLS+M<6WuGDrpz36UcplBqh&qwka)W13OPh`W-Ud*6wPB_hTs^P|B6&AUl zA*0JA+yY6^L7$6bl01YVWe(VNw={0l*p|QQF{qJtE6~(tN!Lsr2XwbKR&@T;$i5Gr zBM4W^i8u`i4RZT{_*gdyvb)Tf_3J7ia^mjOS zmQkV>v2di{p}XWaK>HMSF>eub!9&_cOnlBZ;CQT;^~V&})@ClxK}s@{As+%d*TB(+ zB93t?IHSGUk6L7$2Zp*&BmQMJ=pG~fgFD|)8)JFQY!OLbXOosA@&X_;Lyl%kX4T0( z19vd)0nE>R+4jqle} z{lq*D;I6x~QKLg&+H2_(BIzz5X)ob$$36XbHNx4k3hg3dVe@EwfC8mALfS7vZK?LE zO;(;8UWfRqMWiZ;0>w8${G@aRrF)?G+rXTIu>H5#fkg^RFZ$0XrmAc##ughy=#)!7DQl|$u zfst|%C|XL0icN~O>A8!I5Y0uuVKf=)Z->4*RK8bNE^0SgZhUY=N%w>d5feaYTQ;`b z%Cy4>(PCO76Qg>G#v~Zp>9ey`@z~tmj1--B7Y(`_xIXU@CJ3>A$w)`~7QHziW;1Etc!~ou+hsql<`gXt*3(st z8{OAfvVvMH%w*gf$xv^XfhbUCWCKXISC##ZRYON7C52(Fj2d~n!P6;F64~l%Xsc$l zXu~NEZK)19bKt1e2n}bXGTV}qD6(IrZlGwk)(EhAHR9%hQ(urrr7_x%VChKXt!}}H z(T&xzEve>s)H?VNMT(>@CR9In(0hI{gYQ*$yM|Gt`Aqk83mawBNW2_G?HlOv6K&bh zE%)rWD~uHF8^wJ-D3Q_x#gD&oHd;ET)}dQ6Of(jdI~Q#l1hkTDgrO0ayt=mOM@Qe1 zp%F4fECZpQ3Vondu(&fgrOy*eWJqxdC2U^w`%+xjDA6h@*}dqM2P6XIcOTO2*9 zT5)Dnik$m_lWih8jg)!Z1@8U^JJ2SQh!OVy;B+pG7a(zOD0jn&GiR^4yvnH2uySf^ zU#T9!4_qUk=eq|P@xRXJ`NTaKI2|#QxTRkyyn1lh8o|ocO5sBQ)0m;Qe!GhIp@xc@ z#)9@AWSFCe0iuJYI0&-ATiQ{2G40`oinhy?>sus6+#`TfOYA2Q<0}$#U2d+rM;a+Q zQ7N201R-)B1&(&?U?Q!_p1r|6+E~$!U1B3sY6Lz8z{6=ID?7)gWuNI|jS-FG^*J^w zLE*-X}LbGc`7` z)ji!9QR}uw_cuKvp8=$rRbOJ->}2SW5aWoe%W}1Qrh)%AAx4!Y|5@PcDB`z5%Ge1H ziiZcDZQ$rOjKY18DJ{~T1DXaj!;XG&L@;0C%@N+aeXdbQrMInl6Hwtg2BtwdP12tS zy80U2In=E7=Eb3M&o^Gw&CPT&$=#&ap#BS>pIvnw?(*$O8qIbuG*Gkwm2`rP9-%J+ zRA*kH%gt09O>q|J_mttOroR@)f zT>orc&Lg98IZuDN@uCjtTJ!XjB+;(`+MhhsBcgI9(7n=N(I(O^Iw2|=RDV^X?vE14 zT-?3d)T84_(J_K>!OUwQuAZm*i51!81mpNx(~r(g`o^h@67%pnV00kid(Zr_m)9F4 z>ak}B_ml|r-vE8Bflcjp)62in?`TrhKjmGg`Tm>5b_>Cj-i>+Z{@NBO-I;I@?L{Q zEq1qP{W7OV>ia;|l1#jeu(8U$--sKdgWqbeE@yeGcJFe(9DyGIP}>F?J-6#|h3$jJ zigww2dp8jhJ_Ld`bw*#L>ClbA+VlSzBswnig;jC~PKot_c4uUp8x|`y-OAtovbq$w1e8V`=_E5eREYTzJn+Y)lcJLM^9cA3N zj2Ml$6b@mD6k*>6EI4FxJ@RyA`wp3|e#agmL&Lx0BcJO3#^l?XJ6rdWVq#xx@vPJW2K3a^>eV);QA1P+^##;!X7?Z)|5>azc6C7i4;Z=vT|hp63o006>fHS zzZx94hIbw02>dmG8XD4xTjmgk`;8H!6YYW-Tt`jbplBSe;*V7R!Q`V&u;@sos!;ro5ZBJc*Yb%k$AjR1GD37h(s!%5*P#BNp|4HA zuf|H&^e1WMe=$n5LJHSnQ(C0`HOY-MZSL;Hkm&wqoB>8!yEiH=LHWN!{&*Ry?Uj`2 zjbxG-ywT_%#yapYV81N+{{&x~x81HI)RAVJo4x*JXgAR6HL{Y z z@KE_m%7YCSb(;$8M35omT7YBnrMlL{5&(n-nt9K{w5A*&=s3-1L9I zOytNs3QQgGbeA*+DS2aXt=^S=fJYlJI;kkwE}fPl>=?jKOgGbH@A+Bh+2-?OjT*ht zsl@%az+a-u1RRG1=&UBpNjbnGl|6h-jdv z@0Pb9Lc&QP_!Z-k?W4m}?&QIxi`*-UQ2G=|pHfgg`KxK&%?ugc&$wm{<%OAynQDtW}b%)d~O+V^HrZ$>nGzhpA0D7xP znDMz=n|8E?mCyF5ej3#CUOac_-^LixVYAGOw@v!DO(?;y#5)J%@bc}95^czhf`exi zVMcBb#Ld#JEjO}=V=mKkyvF5pV@}B5ueo^JUwS^mP$tf_3?VJ*s><;oX>e2Z+KTJS z-5Gpu`WeQL-t^YbFAya2%!HZtT*PQ{PIi_tqo%ja3Gz~;ZAfUoKscD`28f;RY@|x~8m1_*^NAR77XYVSoo{-PU%NXPt3FTRyFgfjY!V6v+)6Em~lGVF9l}{Y}u<{SY!1ugDEBNx0dK7$n+J3(amK zMb>7pwCZ68^o?k4)F9EgtN3mf6G#-Oy#?Bt>7743Bj-oA8sWgEccw(rHjs3b(#5;> zd~bTWDpvrF87Ddt_AlK96*4XYLzlcX2A(<}9gc!uY_w>5L+n1e$g50|EgJS!J?&FH1Sat>Q6#HvzRBtNNO?fZbpiZ z6ord<88s58K=l5e?vBBlbJtB9DVnE35>-f;0YTk9U zWJ+zg%k5`=ws?u5q8>!q;zCA{&|QFPQ$T+Frk1-KDrzd`<*Bf#B~hUErAdaeO+35- z%qY=juvY6%zX%LOU0z^XWGs}zgV*UUGg!2u`m&VBFkkmdD9O$SnqXHkN;E{VMueYQ zMBN)G9Sto;=sUG6w#g<;S2beXI{8Dap}L9{$=waEwswsaYW8Xdiw=ebVZtO^bCB2h z69Q@2^f9`$+*@^Z14L&AxXLl)BqPEM%!8l?!M8ol!GZyz2BEOcC3#o`!7oybP4M2w z#^B;bvuIUe@-K(DHe{}F!!y>y9PSE(L>qG73U?wyN)wb!A?bV1mJD%Vg+vt+S|FI4 zxS(!g+vL~jChGGPUWTx?AdM3;Ytyr?W8%@6Y;lm#3z6bLQK!wS0YBhB*BzWxSP?;p zq#j7>?D&OXzhlL%7$!O;FC4#;(Nd#*L|*ZO+iQ?$zw5J(L4<^TAm||GSHCz} zlXX`;i@0i_sLfAL*z7D~MvcTPLDV`jgCTfPl`f(LG1BgvqS4t)O@ZBh zqeb2LQcp2;oZx<7X>D1~IF{YtIMFs2LRYr>DsIc)RQe-_) z*eETAvbGj{{3*vvS3LwtgOOq^RM|ka9ZY zJOLavipgTu~)KgZfW_zLr~G zH+rfuq9(q-8|BqVds=^PCU#7Wx~Cf{nwwU;W!_>a5ol2V8KSeo7#z)#?nip2e7WB} z3y59Uk|>-kXf3&C8}am5U@pC4hi6H8jyz9)sJC3fU9>H%RX^86quIc^PJ8#|>i4EJ zDE>T%Ba$e;kl>wcs_MGun|5@;Bd_fxDE$IR%kGDRx{G*A_zO*SXg$TihpG3XR8qyO ztM=q2-7hxLbtlzJQ2Hg1hRZ3XfD2s_m2)pO?R6J%(4hFsAdY#Fh<8_Z*KoIkgc)9L z@=;TsFXcpqfL8!;7z(!Q0C>Yc7POo3Q*$ zt=B*cV@;}f+w=1h!F8`S<)~@SB(+b8pw|I}$ixzhT5rQ}uQx_?l*usCB9wmvy&+cenY0H1%Dx4%@*m2-CF{;_HPJKI zC9028@@nrKB2<6`adHIihM*8_XO?cfYmYz(>cR>CpCr|aPKwM^>se~JVoCJQFIve$kuXG0@xL&v77gs{Q5df zpAtbI0LWKkwOi57>pp0VbyuT_Q2s-Z_jc9W+qU~ZQ(w1T4H^{xFvNX@@}4F45mR1w zg;IdBA5F>5H>(R>_pt+)r2u6=4q3DlX+68E-MagPsjjb`_~$A5$&glbclrgNGS&4J zK@gzqr&DQ#$KgI>s_RZGo~P(%A?n+LgdiodErswo6JB>)2pSasJj4-LlX5_qOm=F2 z!K9<{=#1qw1qy!=!oJ#SGRJjaGTC((1)itqmm%s~?!4Tj-*sOx)%8`IGzJP#_N$PU z|7ert4UbiWvn6e#>n2nTE2#L>TRneMuapaucT zejBpBx-jW+-!av7R~MeA=yxHCmMb6ihm#CC}H_UO%K%mz@*n# z!lppsA3_+@IVpHK9khrmf2wlX<&R7~8VkwS0KL)~=Ht$&YiD|F9wDCMef0`0S z+jc)Ya8W!@(Vs&U5f$<2P=(?_d*|>MrXCFzWr`y&M9eRNL92!v-o+BquS`B#B5BV# zJWthML)8z`GITU*RQ`=AuX~U-5z7A-^1cPiT^S3#MfW??UUv&r1q%Nj!oJ@~)7I$f zA541P{U&Ho{Era#&0Cy~5VLfDGW~To@01J?e-7!FrXMQA{l)axm;RIn#s3O%KbH`& zzemme&E(fTmq=6y_&We-i%!m(wOev!(LYRkU2QQuPt|`0Rqkw(_hh(#9hho(o~r+b zs`m|KR*0#6+x^Fc*Im*&%{CAMdk@-&J8=G#l z1!tO?K2FV>Kr@+9>VmktsR^%puoAPTB9uQ0^1d{JM{~4^ue&t71f`FGwETxT1V`9o zO?Q2b!aq;Z;~?rAh1T8irn>G%K>^C10NER&=8^z`_a09){R0jVs1iXZ0VMySr*MLK zvdOQnQ2g^0Jq4n^P%sznx|^Bmx(kH@l)X7*XF_sk%>F+-*#H-2-XRp!jVe?)#n} zVcqRad)2UXUJFk-I*Y8d3VJ% zn~mK=TdJ+v>g?3^_A)LDcV}4&HcD-k1is}tS9Pmrc!|Z{QuEY8Ii_WUNV4lz>ljM9 zLlbiUjKmFPC)3$RTVq8p(8A-yg%74h)Hy(*RA(CdhUD6=y4)ajg*(?MYgAdF^yEnr z(h?LuFNn8Vy=4^L#Kg>`JKx0DaKD3ic#?qd0Wt(!002B>pt*+%XRqBIjIc&)4unkd zO@s0mLjFb~Z(DkBPstn({kc0DV~yp=z_90;4f|wjq}>TLOjl%Dh{76?`H*Wj-JOlJ z#)5Gm`De+=>X304Fm5c2Z3ON(!D(CH zFtlxEW^xpFJxYU?b9`NSspqg!x5yt3)PFW;dn*?ud=ua|5zSh=*{s)kaQq3td2^S#=s8KmN-`w5aH?*m`dsX%nb-UGty1Ure8{_PtKyLVumLqUG zfM~zzr23Tk8`0gkVK0hf(>i5Y_QFiS1QKx5Tmq!QEp>59*X=O&=zqyJO=2g3h4TTs zx{WsVZiZUJQ3?$yV^jG&Wv3tu>r?HnwrY#j-VlGN;72!YfQ|9In1FAteRAZ@fQOJC zvaz}0rsWNkwce)oJ~wNy0mh(AZqhP@?961U?dn5wjqWnvX6@W1#u+qI5J)nWl_L-C zD=iG}k7y1_z*ZJXhPKq}bG)S7xw{*4Tf87MBIIViNo2{s6l{#Q!I_w|T)r7!%Vn(lp&=#+{AlH0XIT z;=_0445sN`NE2Pp71xt<2|Mex4VzuX=mVHm+0=(8N%cQdh5Gjn`rXBPb5DI}TK<`L z)%EE6C#j!Sq5f{@pCU!H#H-15=*5YmWn3LPjLs+*ddcrx&6qRs63wd-p8qnBV1nk5 zAec4wV>EJX|E=uRi@IIiQnAL4$zW?tf_VFs79?{XOk70KY%eVEIF*jb-J$8#?)LUV z7@!vnJ{>QDJ}oAq@7oN4saZs7(4;hQ(H)wSxQ%NVdjPW(o+L7SfD8eb1AwtIxD9Z< zrb8yLFwn+$CW8V&;X_)Ez-9o34nEcr!vwEO?n`z{>j4G@tx<0x(sBg00EBgyFy^wh z>7hwX%3RxE130p&r^!owrW#qxU}5A6uEO}z-2AwDA+EC?To52Dd`PJg=YWH+wYR&_ zkj3hu#n4tqwvL55>>74!ybetWGf(@_uS$FmcntptTuRH7=;w;T)`-4@F3HE3K@*_v z9_Ye34i!As>NRl|O}Zm*zS(P(0i5F?l4~UyMPl~>i>|QDEgVCWuHNgdPU5{=;*hTz zbAZO;6^JZSBk#(PO$;w{?WLjRm1YzFPRhTVcZ|FH7;*qFIWIzX{!6b+{C$BxMY7vx zhBnLPu?w!c6k5=JOUD4?XP+7q()VqS!4%yODLP70G*_=Rma5I6&Gq@J3^$nJVyEi; zEgb{c`=CL5_>dJO^Z|fkjM+9bzFl1-nSO>J-BrdMVEzY|ceIF2bW)O}J`hySlB%*C zF;{g%V*TXD8Of)dTHsgiLB<}yL&MV)BH2Fm%aZ?K@Hr9mX7g^FdBuRc+L&v^d_xB= z>{?!ln1=v^AYyHCrFDgbUgZDop@ukDeiNJg@j=Z~qCxS8K^(@mrqQoj1qYqx+AA{C zbPqSk8Yd4zKD>ZZ`o&0l1Zb$S^vE_xG{3NTY6zXDtX38$MvX%&8LpDB#m0 z%aTlN!<6>Xh8@5*g~5c_@FAr}++%>tbj8LV2^F+kn^rHaOI+r$Mm*rIm=PrOae$tX zE81tTvo`qEQEM zx#V#I`(#LY5-4y;G;u>ojQGh08KCp|r&Bpgl*oAsIJXhbbVFuAb$ViTC%ff_k}sxk zD7S0v_oQ{( zq~IRC@aM2j{mFQxC1dOWlToP4B)keqK*b{Jw__PI(D8a}#l6~?Yb+IpcAJU$B_ag8 z1^~<_($QS50>D9etziamEYk|UkL1=WM3T_g0jj++`HP*^rdWA~f?sdg0hUqL#3rHy zzX4#hVe2Onu61(sW4ac=_-7A?$>rh5{3w5iwI4LHE6L6BGs^L3KLw;5rw-x0Ek zAebBuS?LWSd1Ls=z0=48nD0b@$mFBx5%?|u>0ikvMd?CAW8=e<=TxSrx4U;6Zh!@A znH=(Q!ommBBI-Rr9V_Lkaq0!E!DB&hi0|6w(%yRwIe?AJ%MhLa(zKa`_aO=J^Z3>Z zv9xk#yMmwH`;9!n*ukfX%j=Nx0Z?u$6m`6Ijc=ct9PzG~M1k~k&dvLv@vHJ+#Ja1S zhtT(Ot>4ibgvWG#2e=10NA@P+`4Y(J^jhen(ACGh}fQa`(o8TyiVJtR`HJOwGrvSB2@DxfeyA4h^< zon=e5tdh_RMoV;`up|u7?~;eLJ!Z8C`eewttZwzNb)Y*`!`3({ayK+$&f%vFyM|pe zi8HFp8s0=y(!MPcW*R^q=m`O>d1x#_a(y(5YF+>k&xjEH*X0Mj;|PGfPpszH8d}N4?s$dxUYt+ zgufo&=Duc>0bIZET$Zwp;$Me27Ljy>Si)N+Xvo$#3^?!*L@hGc)@C}s ziFBMU=_nW+(qHAqhi_T3=H%J`-S|+BWNN>S)Mi}BJ?+MvxRBqm^c<)QsY0ZD7Zj{6 zz-p$$?$g7&HX!W&J)`}v!tP#~`0oRckSL}do3O1?E|UF$ADsp3m_5Uv z+)s=@!0fhQW!#j=`6)PXF6DaP9)kmj42h((@RtCdB5G)~W!oLN+2L0PAD~i$8hnBfzzg5Wzy9lFLj?#- z&##f5tUocjM;e;@jp6?re}Y;B{T3jMhl5Cn>6n<-gx{|YYl>-0S~F~i#4o@}l7n{@rv5H3D%x_=mafDLSTj68duvj2oE8tt;Y>Z>O^NAQ35FH;`C0hOH! zn5L+DT84ms1Ay*#x0U)zeQ0}QzRjQ9e~dA}%u+)C@-Wk%(;?)b!vz9^8a$oxZNYE3 zhWtuTy$#UI!{gcmgF6zthNwdQL!pn2;N4EM(W{@0z1c0@QtS>h$^c_fc%C^49}+Rb4hM{7 z=0m+<_&LHzgOr(nSj#M{M9h)EAWSN=AlU}i9h#{&ub64OYa3+%pDa8}O8B5cgj@%Z z;{}qM++n-dW^B{PUOoxVy6YNoTX{X1K-ruzFH8RQz{k91<_hli7@E1lUEgp6j4?g` z95a}RQ2YiEN6a#`x|+lQTJDCXKY%$XYIc+%;YJ|9#qy!eIai${(((g0ni zpfYMaOjyx4^$8(QIt^S@4{jU(aVHvS0LLXGz?@~iQAy_PBrvhW+3zEcwWK*>s!?tF z{lzC+8g|C(E(osei1tC4s#B0ESSUANyI{kHp)g3@%n~rbSm>W+_Om)<+#C#q>D$Xa zom%N)kI(C=h8kcygh>!)pVES4-U3Y64QoeO2ywSG)&O>+uoXdugj<1t{#@N%8yem; zBG;}~-K|Z3fc}gJnT?bJrB8$OaY#IS41D+T_LYrhv#QSuFf#BaI^-oE z^RmR>7I*|;#m3Ax7POj%rmOSy^cU`SmV${vQUEE|zTfMR*6I^yI&P12z=x967YVwK z?GRs3e{iQ8djPwGVX(Cj_LT$;6C-N~ESOMhFIXBo!!QGQH-*h5tc;xr{gXsLbXJ*% zYJ^u70c4>^LJBf(t-G^~I2kY0prt~r+VEAeJhQq1Ny$&!WZzs9r;Oa$mVg1Q-z5)wW@hJZT( z04wRYt8A8$y)7>TS{Ff2QYt`yRv8lR0s^M%*qBn2vw_J_x4Dk*&bk!`Y;xLJO7I}EYLCG%m95uRp7!iOM5&?G>|2q?8Yvs;EmuKRF&yqf{H%AZAg zYf6R8)aQ&U*;8O6oP`~CsYMX(ZlJlyME1A05de30H`vB_)tCTT z_6Z9GDo5U>;K2iw0oe8wYvaC$(FPa=Q{#A6hLn4Paw%e3d7(bE`?-3He%v+opSv^TRLvtP#fj1 zB36PTzRU{c$eafgBPG;m17*`?u;&ZL8(@V!2oM=QV5_B9BW@8m>_6}GEoNjrd~T{O zQE{2nV2fB-fM{4E?iJ5;pDa_;_iY)BX}lb1gir6sesLRc3&h+=wY8_(byrw=HpbIa z;67yKNNj?5OHs3_z>w^Z!OrQS-T0~^er)oSTe1{Pm8Za@N`s!I0H&#hG@UH^VapZfg~8NiAJF;+|XP@>FaEF&2l1FDXTA2U@q)|zj-j$sGr*%HD?R!+hP zuSARk1{V3s#xnnMa`n>2GMb_58fbvQCOppkg%74hR1YXD2;gtD$JxoU6=Mxx8N>5r zg%8Q`GQ#!%#xX#rFjKP6tG#KZ*K4=7y7tPl+iS!%W~jl_DcJp=7bJBbsHn8COKP@t zMXSBHUhNE<<2y9)Q$jCNg0% zdzJAA$aBF&#_N#sKu}JTJcqNyx|>ukS>=hLOQyGu<@O#u$mlcjSY+G_sx0yFA%NA+5T3U**@(aYPbPP zy?7fPbjWxZ7+B7NLw`wQPA$b*f<~E%fC^Cf5fG-I7fwlb{o2MOO@4shUN9Gs zwGCAw=TYF`Bw$}xL7Xadlt&x$Zt|y*rqZXxs_OeTrO%W+1}TBhq$}id8oVtB(N}e~ z{jrvS0fvjd)%J`QVUGh07F@JBHdPF`6$b%)XxSsFH1~@Zblil8&wmuTV#e7eJ-m&s@ zf5=3(uX4lxQ;o7cP6;w=hfBOPbM&XXb;@GXXzYfW@ct)W@ct)X6D~}ucvBYXLtJS{QkJp&g$lUYG!)6tE;QK zX~9*Y?CuymJ2W-8iFEJ6(;dHMG~Ul0>B{AbqCSIEzLMx5a`yO+-TSlUQJ(3%gw9H# zds$Ku<5^^6jqe1PEXo?+XFFkijc-9i`p=<0Uoz%I`pQXJB=@-vNvK&$xseqmp`J$+ zYZhxQ4%lQF#%8Q$dA>7G9{s4yQ*tF{r~5&TtPFSo4dAObfliP%SPrfHV|byH6IvbS z0v|$kWT|_(PC-)C7m-RY&Dy3dJtWv*hT)5ymvDFOWW9KHkSPiC5@J{wZ5Hkxbn%wC zw)ab&lMrRIv&FFklM?7<1kr~_-zm;5u(jyI%bk?qSNrKg`H_i<^$N1C1(roICo!j& z15Nvux`!vS#a&+M)P$g(2?=TkHLAkDig=E7l|0Hef)HqaErwSzcs zD#{7?8UopAiBXr<)Z%o&8XU}=2(NWqLW>vUH*tJ;SWB$ek;S*6I1Gmn2v_(=NayvA zOK9(k64~R-r$D- zaWu>2^*=u7=mam$)O5{uFiKj6e29i9wIDi~JL8hf*#NV$>CpM0h{n~=Oj1*c5-}~Y%DJ-3G)eJ z*tIrah`Ak_S!|x^C!Lqzl&zh(V-?!(@`SK&Iy&LHT`?A)1btcb zEn37$YK4*P=@r}p^wD9EhRHYO$Kl&<(>|Qk;NTWCWmVyL3`2H(hjy{@2fnDJ|-acJF%_#p<@%$b*p;1*6NqT5E=C&8pUc8wSlVf&8x^A>@HcQ5Ps~IjER$& zi)A9Rzz!;k!v2IE@l z{2<0J$Y3>5Y#*i&GW(@d656xk$pRI>fSORhB8skl=C^F_E^Ncr;nPd-{IGNd*N^(O zLlYYA8uw>dQsm!|99>PixAb>of00?jzjbCp@G*C5v&$UP6YzHgmYy8f)(F3M)Omk$ z6cXbPWH4+nf$bkRZQG6oHIW3w&xP9Uk4{W*;TLi8G_nvVq{|zlEEE1j6Ijd4oO^6* z^BKn?5{a##wVlGB9eg|de?zs((*+#T5#}$%Y=BI)2G|^zv6@0PymA1c(C}BMCJgRW z)nq~S%PK+!{f!0{!%)nxz>225@OL*L!K1dwlfZUhQUd*hAeN)7tk1T!HrPL%l2C=$ zye3gbgnyAx$i14yF3y8vP6+>YM8fl{MMy`C|B%5t#s;x%IQ+j3N$@7^WZf-0$drUR z0-0%*n}rEw;R5(N&Luq0Hw!W%T#y8oceuiFF;paQ$F)>1qZ;bz!n@ z1{Ri#D=mV9<5BDGT*QsI8%}I^=`ev2p@PbyJLxbPc~Kh4Iw&9fvCeNksB|&6YDcqG zxsdKnemJBn11?SjShTF~osIT}OE@&4XxYg^+Cioy%q0zDLFuh=G&fvoJs3M#7(2+6 zgt;^^Y9W!dg@{HmB$9b=mvLZ93mIi%DH|mz@?}Y8RKl)aPMZx^T+X2zo1dU5lx}8G zPQc3($a;rEGCQYdIz2UD;R=pQNXRXX#^fRbT+yz`;t6qAl^9P5S8`0k6`6vr$PS8f z0$$m!$hy6owsE8@T*XldjlP{LlpRDNL9R*&$T~RzZ$Kc!k5(-d_Mp0<;}Y6T zJ6||E$i)OZlCZkB1H0tS9t<~f-j3$WOL{w`EA)+tzAj`fg6QMEaZw_xN{@0IPHDEm zwQ5IxOTseZCN!et%<<9RHr&*q=hc~$ju~fn%%RY#8C$iYsuXJY*Vwo7U!TNxyh>l8lDB0QBi&s_>XCpcEcv*>XcsR}x z#da3G>R!gw4Sr4HMzEdNvBk5Igqj)t1ue0* zl2xc^X4|Y+PdGkob5KG79NEU7aJ(qnNntBk_O2#kOg5b4tb}^OBy=O~AbM+=o{%RK z$wncc5ruL7Iqax|byaq(2bc>8nIF2IfTs|6O#lnt)$}Uz(PMm{pTeom-GM)B@Ma1+ z0vC=ey5jF7pB16%fGNaFC!Cg2p|c7S$Ih4PE~;j2D?H!0PpNqhyB&}akTnNM3_IXU zm8l6gNgSsjWhH9M)}1@UX^u(gfYlU5ib#DA)!8}~W5Fk5vue$upK?r6KQgb&CCj9Dn=!7&LD?TMfSb1x!CH;1Q(`1(;>HRw!YMqyN2ZN zN_h^z9GVVGn1F>9dYRMNw!?6igA$&-ofU7EAC)D=y&JhD?{~hpjO^mJ%B3Lb z9;J_jK(!<4!=6*vFVhq;=)pwS+osC<~clea9^P zy0#vaogRy+C4`xlQ0Ed=s`?LUEvtJv=e$<^(h=icWK`3!=Het)b?NWzu!NF7_K6i# zg}x8b>^Inr-m%mS8xgRks@^Nq+3u<9c=U%wKLsE+T@B(&G zMuZ2DumJ>%Yz@z)iygr5K!+yu`bYGYYR|hF;lXaf1pcm}8n4R4 zvRa9yyS7Yt2u)yX$izy%?>Cdz{3c*HUQ`q71k8m)j8Bd^840o zX`zQZFeMu%PX`*QDE1@BX2VDq6W*s{MUQk;Lc>_{V=@IvKZ?@$IbuF6pWDIazJu^+ z7f$H2l!Iw5BJIae`|8jxbpIF(Q?HW2u<%$%CA{PX6+NQDaYlw# zLpSpAZp4}R`-Y;K%dKx;50B41friO-Eac$vW|Wirp6EuUxQ-^J`{Jp7TyecBtef*B z+Q4{J9uU~8-9f^W9h=bb=VyzXA4V~uo_T;;=-qRjb{?u5DoFZyl%|(fSx7T3dGLH^By=iVL09BQ zabZsBiTDB%>CD*rPh3U{-M|+*Dhv8r4OsEcm*RW1WB1^X6RLi0=8%m=#&>o+K*qd= z#;^pk^+rRCygqaX=fZ0pp3u&g8t`01fY%W~U)o=5R{N|cvYj?x@1%s@kBjK``cYX@ z+&7TRE~F-Dt#Lqc0{N7C`6;~7nF;T5E+A-r=z0Rai9lBI99=;=55i#@Xo(-gn;n@j zrEV1x<#C%KK{4M#W@(fm+q3$uPD^+*YG3XsB+A=JVZB&P65X^dbKl?YsD$S?Ht*CV z1$_sh^v|&3!~i8mvpux?E%w{RSVKR&b3I^F(Id12P9P~t3j8jDSzTfhL}%$(Mol|Q z$A@=2Eup&1P8F;qBf@(~814Si8)MozSCMtWPfMy4aiN66qS z;h7Cu7dp$0_^6W-sux#?Rhp$*6!b*=7>V>%R}L;%PF6OP@Z*k4i00Vov20egg!%+g zHw0>ENt6AI?sw+6{)UZJebV9ohcg;lsUbmGG4$;)Ms|IQcH!rU)z$7?Yu~;dTf?WF zo-o?3)8bt=0qK5*y4QoQ#S^kdG|p*32CiM}XPtIx^R>EIZAK4As&i$T@j04tRS3_n zA{f)hzTbzjK>VCCJ-E{G=bfHVsxl2-ZM~kDUm%l>mL(uEuYY+7UI_LPS`1%Y57H#W zw1bkKkY6H_^=mfkeQ(^s>&uQxsAOs;%tsj!zCr>k84u>ioVTw!A|Zt4=jw9&NJ^r7 zjTAZ<*~F}=HLO>>xJOgI!q**@&{eIOao; z^(zYLdW&CFlx4#AXo9)r#m)S%k)Kjy;ev6QjPJV@iEepOOP0_rFMe4D$e16{7&>;9 zRgc0Cot{vJ?PO8yU~gPMNthoI!`I&@CU7A@o174S?63r%)g?spqoSagKOys)U=~7T zSU_YUE^EAlpTbWayaRvO(5Mu2gf1LcbjAM}`RtHbSJ?(lw3HBhuAe(RAxvr_BHKYl zQP^J)%W6-9rw677t$A#<6@KZkg!k7^70r(*B*w4ExDglwbg$5UUI&ysW9^A^ zx?y>V+p3&C9Zp)E4s+qx&fe90mnA7~lDO>o4eenmw$@-bdNo|a=eJHyD8+Viyy#90 z)n!V;{Eis5-!)TOit}m0?;ShUe8H)cW{L!v^9P#4cHBm@I24Ap5p2bgKe`nOZ{E=HzDGv_CubpGyWYwdbB<}gr={4K&X??kDNFA$ADGDJwqC>m3B@=U(52*ut|#C{3FHHs z!Gchm;mI6a!9o>-a4}~k1p0WcC}j}=E^b#;h{_?z*Xd(uG+e?d8=G&L3&dw*g9CdR zGnAZomn4q^0p`{2Sw0jl<(%6zbL>Q6>;Ni~zcl6931%cxH*4~6HCI2bSsiEUeiap@eRXQH9#LB| zkIqtSg1HFnHic_ABB3711w_gZT~EMk63EC`K3{k1>f%h6n{_QGCKQESLB#wp7Zmc^ zL>>j?NdK<#I$p=EIJ5cIuFuaalVsj?X&xUhmpb0HJm}0IF(zEkZ8*N!hSDO*CL!?+ z6u$u!d^OkB{Pfc7S{pHJ3@f&_^WY?X4%c_&?$Jos8L=wtD0NwL16rdx_du_6xYg(H za6`u?ye2v&mgV?Sza@U8iHFYN&TMPXHvA4ZT90@-tDX0(KrLoo$y z3s%yguY7YyCo~mB0bP3Wi%e0-TM)^n0Q#Cu(*CuECA z_A-mzBL+;5c346$(@qx54%pGM-QjLUj7ks|)Zx-BEn8wI9OIyb2FcEfFM%J~2VK&! zWGNKvTVC$mkM%UU1PvFmInIFzV{RiCvMEXmdpxl$FI<{v7aVyFj&}iz=*q>D1*zx=vXKxrBq+?&Yc2f_n;ekPki-+?W63on zzM10mlIdpiS;&8fi)_o=ZgEgT)n_6?*+E57*sa7e3DW}$ws3c0Hdff??9(y}Pdxf) zJcoX3Wo>W(tJW=tg`;6~zGTNvSd3g!Oszk+6_eeU!UaMgmPF;oXl0E zelpd0dp(zLQ&?DKWHF1=?{HK??c^faql~-aw#t&?o3)6HwQWEw#Z32JCw|&gpS(Q;qs($I(GU0SH0ku)S znRM|W>~|ARX*OY2GAq*z~$2AkH3%F51(q~YbCB_?- z&dlmUmgIG&BNAq`nt<-79c<0jgu5$oY;KGBHOGas9F$O8My2T-C&JzAf~>yfrCO}9 z?VN;gx(Rq*fv(KHEy#*{cXHXz_nB+a((hID=EJlzQyO5MEKo&9kQqYgCs`k62X?9J zb?0`kpl;J#?pdcL)Z(TRA6XVxs!9qxM=*QKjI|(2JhQwRu6oB|DZOQ!DNs>D;`0>O zt+NGy<21go;QA>koOW`&dv+A1#5q9D23$lOYh2(y#oQOBwN+Cs=1#INLf652@#hT{ zN>xq#>X%i747vvmV)Ck0@MxKOjH!i#Za~77+t~u!fk_F}BZ!Wh63kmMeSOgx369&@ ztDV(s(aC~_#Fr?}Zcl%8Caa!XQ+qe<*}HFY$FA+W!?JS{T=#-Te2%r6;;xX(X%Ch# zTih=`f0k!EemDNM;aQp#bG5Ps0PL$;br}+92*<|lPqP}MwRP`St9klvMZ&0URY!NC z`eiOIBL+0$iZH?o0LOBsLl?VLhgF9swET8{sWjzcf}LY95Bf7&Fty5^Xbw3nq0#X3 zMe`$96YVf*d`+yr+KmBD1%cL_mGGL_*+SWYNeOf=L2L};+nY7enSXpwXC>6EB>@3T z-$Y3Ss<*TaJylHlUlnWESM zWdwK#0gS@Han6P%9_s41O1N|RutYpvst=hclSEX?|!^P5(-oeB+7{J1QHnjjX@td?F;B)O(~Tm zJkd!BHTqBpsiG#}lL%zjyto0->C?T{4l5$e6X3Q&Pj+lVb!0*+cXq&Sg(|ATKZST3 z`;j-x!1Gg`m*UN$Sc%0VViFR68pW@VYqOq$_a9>C7WdVhf~Pz2bo_loeK#a0{Lr^m zh-`WWZ7MnkQ-d{ZaxGW$nQp{+bq;bd!Jb7JOH*v}IL{w*fLmMQzv0==OL(kh30-md zt4vkk=Mc;)zStB^b}Z&@5_I9aljs=K_5SCcjxhR!fTzJ&>q^!BHBTwB+ToGVP_T#L}EBw zzr*Vtk?`!}iPQ&33{Y*CQTJlz2fy;%}ljYYeNkcPbAuyxH{= z(uwU{5$qre3Gx;~ia~`vCvD!c+{3Jcw>l=l&#iUjOiG})5p*$Jjnc}n!{OUq|7aG1 z@nP6^%lds9X-{2NhT_`9W)SR)_Z6cUmV+Gg57N}V@K?5qUuxu!mkG9rA01QuG)KbY9MJ$!UsfNaquT})9% zfR7PyZ9cc?Xzc6`25j@nTl0L}feC(9joVSHD*h+P$B)K&){q~vV|VzZ(~iM^G`#D@ z0iJlibU#I1dZpOWCUb4D$3Wcq?bEKmu~|PqUpFv6G&$iuLmcN>FvP;wGInp2ZqLJV zl34hxTkwC)7R*4BHvt1Wo_wE#k!7EwWqdKMP}7_k#6iQy@h;TfpLcr_p05d&-i{ho z;lDuq)q(dcv$$`$l1lqrA@+;T9uva#vq$8pDF?$fMg9`WtoZ22O-?U$<}mAc4%uB_ zc3@JKUl>^^s0i^DBG?=bI=!WiR-xFdPvNW1Na*chfz$ZuMx%a%;Z;gWU;W) z>|mmMxVsd-?y!VD^lh?gzv%?rML)_psn}gdaLGp&w@=LvMCLQP>~ZRkD0+JIpRl zT&tWTcRzMyf@4uCA{8Z}enQlhafNv@Fc)davWtJ}tb}GgSI`CKhq<7TKO>UOdS6jD z?!<%OTll%N5{iCysz4fwAoA5jD5Z(;g8N8GYsJhBU-y* zU0LuaT2KfY&$hLIWqT$3*{KPyWR3skO5*&5oa^CAIczw*SRJN6W>596j=c;1y1|jl z#S*8QIY&nQjYjeIQ>VD@(Y6qlgGELE?iNj^SyWWg;~yt4L;gWSEP%9mbud_7Vy*a3 zCnp3EO0}YENd8}xr(07DL~{-Le>))I8QaOaBs*Y88%o0bhZwqzcD?u)Q^YW!*W&b6 zeG30|WJ1)(gff-V&8!iZ6-OWxqi_sYN}gJZOQBr=U&cv&$2C90(h|#=f}}4*+5n(lcpIo#Ze2B^;CB?%Me(0d|m!33f@s*j&Ug*Oq1U ze6S2aob1;RmvUah{Tm9JRn!E$G=Z!;SsbPYbN@@e{7#nhCM8lEpY%l2@c8E19fFV4))fU9lP3m0UU@ znpjhwOh}X~4^z5J1M~o{a{eh266LC-(1FY{dg0As(GJeH;q;TMIVzzfWeO^|b!y^W zojkhazTA&0%YN(R_;0v|BNJS57b&e7R8|!Bn#3|vl-+f>C+J*X%UKD1-J0hm73p7_ z`fOi|8Kj)NSUe8baoz;}w4ueH9iOUN(Uuk0r4{%oZfqZ}=g@>I8yTMNDrcW3cTdLr z&J9#&vA3x=t--lnY#|k{?}R%vUzVS&a^go(N>_0Ma#$qM;acGcW@`#Y+05u0IxN8# zb_K!wsGuj}ktC|Gt-T6nv9TBPsltt%m2m4yZ7HJ$QAB_n6HxT;cdrhx1N)W(9qrzD zlyefU$z1w)|FT(LB|RZ;V%KB?6{r(=O*eH`LLFu2ie(2;NRXQm!bioj%z_i6d3`L2 zg+R>B9ksE!T$qATc2JZP@D>EpZOE%$UxN6Sj!T%ASo1b#>(%)h#4WOJ0#+RN<~o+G zB`gWUhz81KT={y}@{&Rv{30Cf7VK}n;DV~Ixo|v|qWf?w+NQ`xKHEQAB{VJ|aEzOh z&_`Z15gFn~d(lB-K44)Hfn&+#K)<_PY-*Keb(|9uyc|DWL_abyUE}d&@uk6*b;vT~ zsyzM)1^L#_OK2$k#25w05B*ao2zmmctP19FU9*cTEi6QXf5L5?mhjrxxw<$zh(dzg zmJqt53&6Kd!Y^KUxScZ+svDguKuJXUCsH3jM=SzXQct z1gt*Txx5hP=Y~5vBH<0R(*>~u*AnVZL@`DYV>PX88cfX5ytBg+%3BQklyWqSVxny% zjpdE~tr!iB;}jSPIJ_1%IWM8SnC{ej?*WmdVib5_S_0y?--RdLS+Fd{^U3^CS9d}AI&K}F~Ga3?> z5o}qqYiin>dmNW%EgJ4G8Sa-3(Y@O5XcR7t{ z)70M6(0*8S${3Hf z=2EIc!YmPkcsDTZeY-4@d;zH zq3Qbh5)QfW7Tb>{^j-KoytFPO{Q>o}H)<70Wtr)#P8ieDWcQt?nelsb4auKFc@|K- zQk`BeQ$OVD&2JUXGVxqN+J|#(_U$t5wRCM3e$&nsq68IbKRoxR#cLfAdiWfx{` zeO6W2nSx~RMcG)xv||k!?(M?I0ynlAh-a9^nSgZfLtRF_aGSH%Vta3Tp&#z+>W%LY z!t-kfx(f$Pza%8e{mK+%AZGXc{!VGE3)m`hl_}ktXszoA@&H0&U0~%sqdd?lV|oCw zOwO+^DNoEWWhHSQL=K(R$u)$N;hSfa2Rmg`^@0rO!h(=Q7fN4E5RHn_SDpAM4_cHC()2u6r%Uy8P=#JVdBy6TQ(r%Uri)MUtmw;9V5EX{VzseG}kH#Q`tr#Yk~&`Su4Z7+t}SaP!) z|5C>^7MhA{S<(^YWrW1HbtkqT%m^=cLgU-Y0Rwxoxq`G`kzc?rEc)5On&#{s_QNY( zzVQMo_o<{K$g2t$fMnF{0$%Ne<_q9zv01ri9%@vO_G_rkDp);jy|Ax!;l@H=ao1c! z(yya5eo%@gI=naRGr!)&&qKT9GSYto_1P}9**G%B^F|kMtO-g*KRaKlZ=$LKahOzD zsK(yx+Kq;n9w_H%<}yOOg^2iiZAQ-iCa%_&J=C{4WlVrQzPvLywXo(JT%59&P;axV zF%Kx?yxlpCo?0~CiqB@nCJF`U9VGDWf?ESOxZhD3dZ+6*-mOX*D$3{*-UX>u6;T9b z7WI0CcjLQzhxZT?2MZ6ZE@7^)es}xRgTY#OuUn8XW<9Gj^@hZcMJZYFKFDe6F=zC| z{636@`o|bWZe!_-UU5!nuU)Ad0F!TT5}bsXVYi4cBc{L z#?%5#*2JXI54r^#n=PoBfm4CS zDQ7gBva%Kx^&r*V~JN(qx> zPonJKgPpa0__7ms;tw0Dy0VVI<*%|8Vq27!316WJSBJ8{di>DB!b^ubor81JYvHSI zK*C6btBCDK6-|-9Mlws%N|zg@?(HD*LiFtN%3AokL&r4IcAukwh&5V*eS@$KfYslQ z>AAV~0j0c!Z#sDo{``M+zjJ`h`4-Kgds5m@sfBE@@NMTOyhDDvF5Zv0ITOXi`VLvF zBr0pkwE9?8a}d7kyo5VFYDJl-CEWLjyFPGesIe7R56g=nrjET-*`t~co%R5O&Am3Z z+zQ`!^hEQmHn~`^nzUug5A4bdtG>6ehCNI;*jWlcbbLZ$X8juSxtf4KB9PTpak+{) zcJFZY82$)9c4WdV$fAg@y7)ywRp_4(%@V~@EI0B>uCE5rQMp=0_<_AZs4a7jm) zpAo~C7LQiRYC9%2O&*_Bjz4!$!VNV6U6~ye)P(y5aT|b(>+*0*>zMq}q6>^_vW8c>i^yjcAkA5%5_@G*<-@6$b zo6U$Cx(oGkLjHkBj#gJ^tm@L+9X|Q+N9XQn=H^0T=ZA47Y(ZBB{D}r!53;e{z&Kd8 z=q852vQG-4g+IFyQ_V(H#biqL%L+l}{DtPwtB#3R#cklS#IDj;p|gNcL?@v4j!sJ- z%Qtg%s>U%d^-A9KWgCR=ng6rio#s4?9==%P~>%-3xbJ(r2*P87u1N3({ za?B8v)n7PEce5ZM<$q9FuW)&R+ZL7(M;ZF3GZLzCScZNdE2Al%7)7=EcjaG=RCN;Qt|a6N&Cqr_A9)S>rx`vfqKiNoPId?>-!Ugarabi`1FKIHmU_00wcM{Dtg}xxsY?i7w zdvW@3xR65=o~;Sg-esr>cVXh_#CO%^5R5to`xkoWlK-U8S0~Brt!T?I5;7P$dq*9 zc2FlP11?De_-cju+1nm_tZjQiKK$&^L!B2c<@kh>mg$H-T2t&xlTAO%y2w~e`Y`;T z*@NLSj!f{R?Q}8iz_o(dn+x~myl)QWd@thfxhA`M}4i*2UlqO^9eu*sT?i_PQeF-?q( z{*~O4oz0fGn9T5_dTH5lW!iBg+5zu!b*_t1lXF`5+|~hNkmq!m9Uw|hethNFun({x7qY^x=H?`n{d$MLsww={iD&F@_#I&f zzdblNT*J``Qzpj0cA2Ee*ChEUkQuQoPMF?Sn0k#(%FwgaH{n`t$C=Gfq$;YXQvGrS zN#bi5KeN-oz0^i~$YVj4mj@x+mW*e)@va$N*IGhZ$yQ2n+-Ar8Da;q{}$y0++e^s*x$z9&@=kj>9^bA`VLG8P?qf@;q>UE>HnL60PqO`^}j*;Lw|24Vc}Al%5w2^|$z65Nk!bY;Pf zX#xG)!X_K)%rG$94@WsVp>3*8Phc8Ct|i<}h}!^MwFt)L^)WB6y|jF28W(+2Cnt1H zt7^5Syo!)PH={uuQ$$y%HPaomd!3~P>`Qxd$0r1A%SEiLB+4yFVYl2T&DwTLg++K; z?fD+cTezj86XO3aSek`9T3zO}Xb#M&&0vIwGdwXQhM^VZfP~|ml@RvN zRCHCN6vaKBT()o43D}zTvX(G~1{;-}+X=UJa6;2+G9uf-kfzWl5RERGZYYLVd+4Tx z+c<2@2=C}Sn#oB2w$x`gAscncMp{r)qffJTINZ)r2{rNPksH$!^F%T?kcm|qm*%G7 zQrZX33|>Tk_y7hN`p1UbJ3XP3%^7NF43nXUqK%c@@{P-XUZn#~ei z7)L}|Lr(@YaP0Z2IZRK?Q^=${ zl~vdKmRR?r30T#KtPwcX$q9ogHH|>6s`xv}r!%oQck32^R#(^?2@_5{A>sMsy!gyZ zA_D9pfB_`-GKSZ;%91=kK<&)vzPjfbY z*zd4}*T*Dub#@Tv#AkX!GFMEaCQF!c#dc@cE+jCN-|cW02PeEeCM2*O)Tj!7hT*F& z4^t@6Ubo@ZhBMcLHzDEe0F5cSojr{`W~##9mG~p^Y=@&1Sdw#*i8^TT<7j_BoaJ^T zIO*d97Q^bY=WeuzL7OG~3foYkg)Z?R;O%O!$B?p=ZcF*AY-0&immznjA?z7f zW9bN*sQ*px31%6orUuoTKx3fx;mCjou{d6=h1b=;4bIy z#F?bX$Z4Ysfb^+goVu2aufc>&KOUP0|J8;nPJDZ<_sp*pKU}zX6la?Vp8p0%bz6ULh=*GCoqI2)i%q@3nD^lVt zk#h;-@Ck=yCnWe3H3W)CdxhG!gto8k*_vf{gJFQymM!)P*LASnIoz3DMLIYB>ul$r zG5QK!a>R0>2_V@RXd_2Y=6ebowC0gM*v8bKLFl_337uV>u7^7$Bg%l3i-E#=mN`|c zu03WR$cT<~l#uQ@)V(ouF&_fu6$$_O%04YrdD{+iguOGj9ibCC5{Dc+HTu>X)-`ou zel(UKs}9pDc~I4n?Zp)&X|K5v38SCmMz*OY;JF0yWpS4-z|%qoEp9^*?(h8nYrasE%=Bc3eK!^(%N{_>=&57@ z9#+XK({oybgQJ`a(}M$@5FY3jC5-IW#ER-=Wx<1J!A)R6y*Gn?1v(U#s>u*=Zv;=FlxTzFEhp`WUzImvklP)zW!hMEPFvPvtsN|! z_)xd}Q1gSuseFDbv&F=l|A7^{tcUTkG^X8ITn=l>92{VoaMa2VcOw%@f4Ox+%12pD ztVfVVCv-ULYrY#me54Z-Iz(f$$fPIeqX@0!b5GB(`}Jt2CG_=11WlwQ&SS{oI656g zcDxuFLwGCO!-{=OB@8~+sR_x7rlsO)2lc`-;&FDd#c4KjH6HKSgq(p9^L5B1txGZ@FEot^0;p4RBKgMSsp6PatseI3i?^prJ#%I~xvSF%; zlWjHweEfDln?>6X&vvU$ZGKWFrjlU?Ba~&vb7%$=5v^TC63CofZ07eZ4QxJ1cLAeD z^sx6IKo=@5%KcooY`)pDs=h3&eu>JZ0V<=PN23|UjSEa^)1d4lree>9=euDEm2O;K zDi*GKJuzQEW_56qZUhH^lra24rzQmV?AeqbR5nOyk^;YoU^X%Rv$5oD=eX7uOzOe{ zSMwc&e%P6Ov2zpR93~>J9aI#BeF?GjBF)l?%s7i(0VA-}u@NC|uiOC4za}Zrzv&w`;DW2U;b`%2&}! zR`cbY70ilUT|#2ntKFD{H^C(IpzI*#z+`$tzJ^Gaj2MkkwqR>|mMiyQcS#41X)n!z z%>=}&5fkloq|t}Al!BPIjS5mJ+xw?Z+A$SB^8R2wtMGa^At6P>RAqx5G$CZz8)(?I zU|2RCHZFMDI;k_*jur6cwrGaT8{L+Kl<1mywRNJRzln7ATD8#binOD{})} zU&g$P#;_TvrZOY8!fs@Ew;Pf0s*g#H&h!L*51|}7ue`u65; z=tiVqWZ3&?7`^Cn3d9WJZ{huJMnZDf=-dZtiS>cvO_*C=Y}t4w+OF(lfAiYj=>={u z5&v7-!kGd5J^{|WTR`?M$MI8OF`uF_9C`8e9)cz^tTD2yEw;j^-G&r@wclj~Kqico6!|kGv(nCYJInfS zOPxb)_8eGOeAeL!F@s!5{QPiOSSEaqCa}@UMmRTN8U?bHxR4Ep5Eb41yc>{^`c^Yi zTB9lU7sx&iY`)uLM(2DtBD8%zKKDg8sttvPhGaeCb9FSC`z4xNihE4YFqiquZpj!w zt~u@@F@e595FORZoOXC@$fgTlbzVY`V)P98s3zdo2xR!zoD6j?F>`yc-0shIt?mB0 zgA+z-O-jYk4r+B}!Z&CFeb-WbwgoFaw6Hu}CvcZg#eh~OwBSZReABJj*IWQxHC7D8 zQJgUhS@tbj#_A!f{fjGv9O%x?Ar$>>HznZ>Ey>87(l^`(7Gqoz7XLfsbL`1leEd5! zYJfU?duMvUv8V64F?VUcFQ%C~Y>;zg+xKYOm4TV=&d;{=3xBaVjb47}hwr;33EqWH z*6pa%672`1-7vcoqjRU?5-2}(_*rSU;XGwei6!HHMB`XHpy|SpJ$kM8SVkx_{MhYC zh%=Yt=w&5QenJYJxpJRV5&NGyX-qw2u8f_pJ8lQrBz)BUg8hsz#&N2z@7b% zAQpb@oRkm|PQv3BX_TV4zaf{;&12?qy4_*4AHQ|t7#Fm;{cu46e@7sTqgKP@zzzap zi~aC>hpoFfmX$>L11Su^*USVTH7pi;F~~y3;uz=@{?QFuX@0IT+Q161JRWwQ@w$pX z@hVuuTcO;V^|28UyT>ZVK0D#hZe@ZCWHP0KKBOu1Ux;Ra$jb%NCi~c@w}XQ5SBH)% zip>QhR}}JZM6#=n_zx5pkdn#-KT z1iBzWy!&)DMmzb)He16h;_m&0+>Qhcy2x`}X^F5%z=4=fGF)s-xc^>uU9G#F2YC`deXU$uUs_>U5z8IY;PI2U2ZX*|HIkcCSmckX>iiAf{ z8=V;~E@Q4pW7s$KWZ$VZtd`K*%QmWY0Fhs&G=?j=9SMzNJ&sib59njwP%>*F`#E2-k3{62j~zp+{l|$STkEguJF9v9bjg#flq5X^JxZ zm*sFRM<%!qg~c(3r9`_nX-DD$?QP9M@Tdyc`gn6KTp5=ux{kvW+N>ePQmrtKBg?K! z%UZC^het;axptRkd#hM?2-8LS;d*Z4nWIaJYp!$1wMdec8)zl-%ed|ugQ8q+t#zQ& z!?w3>NWyDlr^olTswLD7h+@p0#hYUTE33U8|J}NKV%s@L&xnl|qun|TH*^EW+zD$k ztD-Wb`sE0c%sZ0iu_R&6L!1)J6^OUkL5$O`VX3ASIvF=|+Y&~}Tv0~((HMPMbYog{ zT|S$*TgY}!bw+YCSAAK+K;HnNZw$_cquiE+s9&ZPpVL@f*>Dru!1mSJCu@>0?NSRx z-qhg81ZU+@bVQ)q(+mxzjmQ9t-7RbE0!xNrDRZ2!wzbxs?lv~i08^V++g%Yyy zdA5l@RL{Ztwub9&=>{bX6UBH0LZ>)AMko>xvZ-aa$0rdWO~17ZJ|N$o==dP&d9+)T z(B!&QETlFkp-xyf+{$biF%fimWg78=aE#lKkdd4)6Ew=of@5jH20R&i%?o3hn=rky zJA>{fZ%R1Mjo67lPiTF|jboOiWytY%D^!{^xnzPJmF=9d!rnSMlvwEF)^0`0h#dta zm6y{|$}-{v8o{B%ELdGl<(^yS#u?}^&4t^zAqlr5lZ&MyF2OxoT9(|Fmar>Sj#{EK za`q|~m<_jc{Frc>)$67qz8%Ccs0j*sBB89P;4{Qk`a5$4-{Y8Yd#8@+H#>DXzK|ot zM7#ru^f|5VQ`3oexjVlW?&!3HAs`dbHQGTzO}IM|$8o^Y0#k$O1w2-p9(!k}CfwQ5 z^jNx%F}b`bDDXzR&SK<)%Z_01HEeQX!gZQJEYXF)N2Vs+X5yF-UW_{_>a@jKC*r>w z-r^BaCkhF(l^7P%d>S6dv(dIrV}^7uY;#ybw>j6)rR9f3Nny7W%Z_8U;fX8foZV;8 z{v>B7Onj{h#hY7+lN8it#>q5;(X7C}S!>TW{_eLK-b2di4(BJlDp5hyIx*o+A&&8z zA)mTtd*fDT5o;5j>NeaB=QTX+Az7I-^zA5^Y}`p3Re_Y2PiC$~%n+V%ixNB?tEps& z9mHzO6@}eJY{~6(mor<11sTfbGKSr5LxRI&XT_?_kH+ZBqDfkGblt<+wO5Y$&b(U% zZzId_+Oaj9=Elwa&mP%$r0m^8d-+Ob3C|PDn8e*f4;NW*^sVMj?W~00q+b$rjs<3GQS5WrZJKYy@Q7kmk^1RW=X;v9!r4p(xHR|__B$t`@~vsdOKReA z6EMXS`NFX~$jL0Yk6MmKu1vU#LlXvCOh&JP9Sms-eFo7ApjLukm|eXzy%f%LZbI8; zeG75zz&aXHP|&*)N~fu48~)B_1!1u?8wNYe`4i0#)#Qe*yH;DK+>NF%307X-;oNO( zJ2)X?QZ7vuDRJ&j4*N6xg~c8p@zhDaj}NvH!qRX{NGHMe18^4YuIXRskIyyjyA7tbP>wG%s**yQDi%Df1tjbIniupE}@&4ALgG5+A?H7 zLl{aeEyAMvt6GEwWy1;gz1%0R`)H#OoHy9}dSPvTS?jc8w3F$Y#yQU}HVd7Xv z+nNp-SV4dB+)kT!Bdj?xp@FI{4^fg5?Of70@SX4IpqqG4$85xZjwojNm0^|-^JFod zJKM;%EGN{xh~hX}{#5f%%q?c%ragnU_+YrV;}de>bCr?Brz|gX?n84prfO9h7ah0S zk$=zHG2GV;NrVs%1-*bQYx-lBx z4i9#7&ezLldcr+~I6f15dYh+b)_RQdVsVxkE^+=)rzV6dOr=yHs*(afj9>Atzw}K>Sr$Bk7BFmJbJ={*O)W+?xtA}->_WGF4i@hU zk930)a*kcLabe#gL#927rm^Pmk+|B?J!TSxN4rg9oCw!h$A}))WzA#EnlWBC=GO+) zj^VLxO+r3^>FH_NL5)kFiOZPB(HIU}S^R~|JDK}1i@J2s!TKo&!{gnMDg1H6yK7or z0%VbWTThTRPoOn=eKZ9opZ78ks~Cz2PjovHI+k%>PJDSr2nzcoV)^oL%0E{w>udRd zz52wy9rlKLvV&79a@UD)KjgvzMOopWLcGTPv4|oP^E)%k%LiN7+Xh*|h)zG%$qD{Q zDNa13CEU}9H z43jIB|B>1<y8#o;_rTR;f*+00mL)HsB@DCLSmB7|1GtQcI=s-WNhqFGMHyB7 zvXXfqj0}4b4dcVH2fH&Suq{p>#5CU7LrtRliINMOBJ z4!I+&Gr%6v;gt>=B&OKk(YkESyj!4Z`Mwb)S2-eE&*>-TRcV0rf z68Evmre+LjiS-7u7!&oq(x#V|F-1JQ(Lo6%W_XLVs3OFhh+r~bwRhkhk78V~|+!}NbZ;2@j{K(yTur>r@ zW8s}{KtgkcQzLquq~Px&oGE+N$1{2n)lHfcGdFdJ2e|nGj!&%;uFhGUti9! ztM-03Yk%`y8(E4PC^GH?G_LNs*1mS|VduJ92JnM!;s5Hnjt9!v57F4VR$0>;KI|rK zY`$sM5bH78fwjt%6YwK;cj&Wl126o`9tuj6r#kg_P0?RIo%`EuUOSC5u){~)l<8(u zOgj^nMfP19Pqu!Hwz5G$VosLb6Ek~1?#7(ZY)m1oxF{mRCrDs)r?8tt42FhJIwK{{ z8OKj`p+iz4eTpRdD(13TA35$n@@c0fwB05loE?<(g!~MVynO|Wu+-5YeAZznHs8Jx zYb{bpl+TgEx?uPLmsU~Tx_4`4V)(rC@7l~a+3{s*1WFcuffil@BuzeD*ZJ5_LJeHP@Y=*Vt;d@?D$~4|kXAi-s>dYezH7r1WU*V5&}6CVYh^Y=CS` zVxSQ*FX9wz^21IA(#4h!cHxwM)vZVf1m$Y7BR{McmnmPPDJr_}Ea#%{VZ;+-bAjMtQ=eT{|f$&Xd-M;ybmjWK17#Ed<`W8{_v)QmOnhb`^ zTlr>G-*#X^pUq_ABd(-BxTetGA)4-)z2wEvR~8)muCvF)8?BdPI@!6|Sc!c*tS&3Q zM=RJyWvi#~`q9|8I!xma-*^5Pr_}g0>x7lm#QOnxTw!5Uf;7CJF18W05y>CA1qrT2 zdKz|_BFlb6%Q%``7w22R{Atd9!ZMH?hYCM-vr-%}kMOBjB7W|AB)Ftv@BW!q=;pYxdh_{Y?HKL%PzaW&& zw)GSd#%XIVSNNs#ZioMG@Xm+&imoHfuZXcZEXB=E^R^6?03rZ*+*>hOHvHP*XEwvF zVawjdc)P1#jv&dr-_Sf(OgIS>@LBn9MYx?=s4d({ro#W$4H}c%VJ~McqcWHuR#XN5 z9pP64+=iGYvK$W#ZOl$1j`e#-Cj?qeBo=#a>){ z9@oy*WB%&cgmGw7)LpcLaq=?gZ#0P2dpYXG_||Ga{M`-Mh(Bp~pG-jp*g=-QFF67K zK_K7WQf|BavVS@);UU(y-0q(Jix}38wP{x|EFb>uz?4{BT%kc%H%3$!`5)35{q!Kt z%JLz^aQ^GSG1=D5F&v2rbOa!Je0BTHh6~_-(7blQImHpMF>DAVzeeh?n#RK}?NVh?cyB{v$(1fNl zst7qsP}oZndlO)N2)cGC8`(73VhG_z7Y2gN-JiDGQk!RS)Dut6q!{ywviDt`O zO&0ml2yIz%d0H~gMUE4b+(o{E+w(tlk;@cWc12poPH|jZptWz$PL2qnW#d1XbE2H% za3wb=A*h^5>Gq73l?7L(1+1gYIn<(HSzgywoSRT-47rB|C84fL6#HiwWW@p{J9cxl zHGV+(jGN0}&4~#v7zS8zx<#y9%s-f_=vOEG7|_!)DpAzEJY2(#n#1`G_1VmMPU0*e zW$`s>F}*j|=Lq)CVB=EkW`kGxT5iy7n++;8{6!f-u1yHt;+Sugb!M{8Q9Qbi(-L|_ zB^i~$(l7~VFu#O}~nz55^FI?aG31cfRA{+dOA96`?Z$R!1z-501j~9z$WYcvSVX@EJfFRt^ zO-PIeMm>xnu4i9WZcHH4j-+YyK1<2A3b)_LElC*Aw9|Dj>_CaT9&5ibQ7nnsRE6SF zmDsVDKGRW-Oel#ZnOF|u6y0HAUFuE9XR@?)uax;`DW>e}W;um7bt@7Q`%Fv**ue;8 znQ=3kAx|eR&lf{5wg(R)CgJAJPv|RVrwgBb9#Rzd7UZ(Fv4((GtmNWbIxnHN8EOSg zMv#_4u)Q67;FG&2k87=V=X&92ha@yL+1a|Ns**UjA_qUk{sdRMJ;o{Lvp)ebfsQ5U z`Ybe+Tv=Nz0WVUv3-@u(9^)CB|5cTX#ixlOla8lJmx4)g$wLlHVSc^C6P{tYLtc=P z{t48#W@&$S4h<6Xm2jcASr4StEEQ!0xh)}uDC$PIJ7jIq%8vEla4PH2N{OhS*(4ypqbCM`qmkZ8y;;g0{m3^_)I+=+&;G0bvN zVlE5P(K-w0P+|?OJ3Bw6!xfWQVl`AuXvqad-AF1M0q-KUW*6t^{a|=_j*dXsI<;zGPz9CjQ0M}zBE^WNl3>~5tx>-B18Bol4bpl)+kf_Gsm zx}$br$(AOm%i2!xwE?yT%yz4Xu@$?x%wo~0mX<>cCpkR1i*ao9uExpAfRoJtYyKt* z6VQC>#*XzEU`nxCv|>dZ0qNOO28stL7|D84n>axIh7 zSYtO}>B!lGm@tD5%Y-8n-kO>J zWOQ#w3W_^PE=Mu#d2sd&dnj?^fzzCsFidX}rAJ!Q6LJrcj8j(2ye_rhM>f@5m~wDJ zFVci+-=u=7@Oz2h06Z>I!_5Ujw;`dOFg4j=2cx8A$mulX zMld9xSICFVF{nNK%rw=kn}zi{+S)!W?00JtoR6ZUEGm96k|DF`bj5)V`YOuRoZu(r z&T>!VySO0K#dpF|_I=!XNfUfgieQME241>3wn{Zom5rOl=lBU3S zCz#!H3_OpWy?_M_!?YW*7iTmymb2@gxd4Q0nxReXsKpQ>e35LGbyRoQmYWqGX6LUQ z2(xZaLR>Ky(@n__v%N>k+A?I0hTIH>Ai%0Ou(c^HXsg`p{bS>X|I=6Pg|NJec!t}w zzxf`yu1xZyMvAPPr*#_uY4AJyVZm)msH#jYme~=xJef<|mmvpe2t6P36!yf`)74O* zuuq4s8<6nUn2HRrgQ}#!_aL~`?qdjpo4p=%-uY_xqnc1XqS!3ipga>bnD!EbpmFN{ zqGJ=9B@>GGxsv!&P!)cOcy^b{%M4+TJl8B*cJ7#7uk}1fu2~etgj*qw)nL}Q%66}t z>TsP(gk;0nPE7D}YC33DK~V##Or9t&htXX^(>Lg061zha+>O!eVVItn12P$!o9<&3 z?E}*TZ0LdyZGA*T-M0Qcw#Qp_enM`M>%~{QQC-%YLu*(YFZcV?t8-n%HP30`b9=FS z7|Sf;2y5?KlW@q*Nti=e6qH59FGdh#(qWp!l80s~%(mihykZy-!|ye>A)(~Ob49Nc z6YgB%=;>xVHCcErrXy(A;d?qWp)#r2@M&05{`iku?T9!AqR+}T~k>QUWZYsVsIVN7J36QJfDaHHdgyFm## zz$PVQ?4VXxCOm>B+!Q9*eZ{|7vhJA@NFD(Vq^A5Hz#q(g{33YEIqEV5ovm5#? zW-AXkCnA2)f&;+O)$mw1DWN4y-v1&~mpPB4IU8UO?m~3Eck6a!jyG4zMm`m6&WUXg+UUn&gS8d>V6z%@YWm@+Ztj`Q=B7NOQ6yRUG+KEp zfW{T&t)2O41TL~Pnr$&H|LJaLLQRlduIo@T^cggi;kA|S>_N=R-?tP$#BEr^Gu@hv z&37%6P`R#^6Y*Ij9tpL)`1qdfj zj$4~*|JU$bw;{plHwjr_2PHirpGRb#m2<3CdbETpzR##zFbn+oj!tN%tfMNl9dHe? zAzhj90-C@W$53#FP3l}{W_6*583n!YLN_9zddtORMt+zDe#+W1QMYEweIVzWy8Ly%l>=mQEf``=F?(278BV>$ptS+s3-TrDfC82<3da|QgTeiH0 zwlIMWRZ`qsYH%*sE@{m{;I$6lh`(rPid{f>Kg#9AdmVY4<6qpLeV}Ct!s{J(hi2S} zt=XfLP;aP11*CT(lkkoI15}g}>PU zxFJPFm9yd(>_62ZWYjxo6zd%8B=3x?p>%w)oOik*33tO(WP}}5B?W#L!3^=R4o6W)kU{s%oXAo}A6=6y zSMZb0{Ga3s=E}O%PtgqCS6h4(U3aX6iLJ5*;nPk|D6`d-M{^x!5@RU}POkDX~6B7RzjD?W!$wu44aSYj<|A4LdIP!sng53xD76KwVB{$B#IftWI3E ze4)vfok;13wtVq_Z;Ok|mM_tkYvVeHy!ORG>{26dI(*s9NO+BNCEb(!aBm!2E(ptn zuh0Yz`^@(6D6sNre|1J5qu19CUv>JJ0j`nxG$T}n{uY81Uw1nu zn+t|1>K@oZ%{q=QFN3~8gV;yhhLN0@PuTayaWfi6*n)h3Z@MK3-fss|b4ql~rkan{ zmKooo8BE43tXdIkyKg%=A@O0vvK4uS_Z@PIbL*#&rarxBoiWA#zw5+=sGoTpY}Ij} zDs$_H)pfPsqZJHo_s{MjMg2JB1tRD_%Q&dID42vL=S02&RKxe(q%pBk8l!etlyW{fjq!*UXWK3x3O{s165h*_jI1erQxq2eN940CdY{EVbdl_xZPOQi zydE1|L3}^*4+UY_@Dtjw0XEdOi0nU})9rT={tG{KGZOlyrY0-wpw!qFq-Ds@Xb3AQ zYZIA-(d#Z87+}k@0ahCLx$_eoj%veL5fkwjByy7A=yeD%s*$gC&=0?K6Hdoz4GmYW z8gGKHeK3Jfadp$Um_ivM(|$$MifIWpz&F53{s_Y&PX_bw-I_7Aw!I`aQW%RxHV#>% zD*SJVr{9p3-}o}L7SG|V(cd~Y;W=aiqSlCs_B+z(S(iL-_VB{*ot5DF)jIH$5#`kV$`|NvsaasVXOV zs>a$$tO$}5Zt4v_G2#lFw$&+h1^y4gOwh@u3~2SsJi$Nw*P#iq z`kEy$D|%ucfu3GD#|!OU52+^?z<$;_jr`Q6)T$ zfW_qQslDMsZpK+Sv!SZTQtkW;jG!XI7o!&IV9Ce}(@0ji*8CNBqqvA0b7HeG)uyi? zB+^AmVi4FIjMf(05SX*N=cd>4z1c42;N8t&*Na8nL`SO2nv2sK#f@S}dT(cO1v%Lm z$zPr6!xzVPr{NNAMM7!_s!oQsWkdG4MpNudlFcTeE{g{{YQbrm4wrIx!e~)0Bz}IF zX9s1vGT_oQfFqhd%wM}q0oTNDBZL+(?i!~;gv+>9V~VG%j!GV2wSqk4(AvVPL7#?M zvh=dFloexTrH6bMjmx=32|-L#(Ot5GZ1D(}6!`K4Gl&^ismGunr(?`(5V0Sw;LrqL zWN4lHvY2pJB#!;nk!yaUg3v@~PMo`vNui{oD3|X0o?z|mT6otJivFse>o-51IFyKS4=gfOtjN!g1-h1N z@>=-n#PXNz*1*9a%lf#M+nLa)50&C!L1C{=EaP$GCV|9}q3byRwC4L%l+%+eeo>ud zB7K>2U7B-A-UX~K+zJd-hU+=^xMpr;eM%`vc>|T%a&nNu_i~7CNVvWu5~3-kQI3j| zP&Xiol~Ly89E4*C*D!O8Z|JNso{ROzYTTrfns`UrrR5=UbS-dGIWz1=j!TH&)`i7g zN~{}`#cIaFDO)h>9ee39*BUqOdn1b_1{+apV@|*$N7b3;9yL z+VK3Q&QFLxn40)@FiKj6+>C~>zO$aPb&i?q&|c|bm9U$;1qptL$;bjb7}6B_7DUqz zfxE%7)WXU?+xaiZTvdbLg?ETxG1dKRQ!TH z(=jBOcq~oCPhoZ{{5|wwj&n1{)B_`<>G@n&sR?yFQ8&u22{nikn<_EXLKNfHZa_kP z&f7#u-2*#dH)*84Oge!kY1kF@InU1)z~kcVw{d=gS7ttl_;!$s33gk;SnnQK9`y0U zfW2On{s2=GZ|As;*)1BgpQu&srUh}IT_(-+!tI@UUMqtkO`-2V z^ijBwksheJXBfxV%1ixlM>iy4ifF16T4uCn^1cbF6+~H_Z^LAU47=mE5Y?IRy8i$%_&}eB{vYD3L43<f z8dR=*)DK(Sq%&~h`K^scgJj@V8pv*Rc5U0SB^g%Mj0s??*dz_^`ZhNxp~TkQqZ&=I zx0B76rTiq9mbuY1q7Rs2bCRPII)|eRM?q5LlS$^-YPE$&i{;Kk*x~Smkew;1Xxf1} z4nl|+N^;_}m!SX?*^q%I}jWDj^U5jVy6643JoRE7AnY{_Dt+^gJn!6RwE}3#yls~a51$7(lMk?#o{@NlEq|pqD2zdVZ7H>hbFlZs{TJCJtc9+d zli+z(HDy-y%X*NEy9bS9U`rj6xX;Z&H{dq-kA{a|ZUu7{srRUUU5Je8oAdA4BXQ#H z1fn|ODc$m@E=!hZ34V^yx?Wg@3;*_((fwL>bi&2*WZmJ4iV!PAu!k|Uwlm!RgW+rk zjpJ42DU|UOM)H!d^0moSo2PAKjKpOtizK-2yba z(Bo!ZBRDm{;{94`C9Js_cgLv>?@d)%H=_C_y(~E2mhXmn$~jJko=Zd7(BLmeFO(DK zr1rx--N1ytHk`Kg?D3m1WYxWB6&*oa2Xw6YQSDJs9e$P-`Bwj9>TpH^lV{p=%C7Td!;(5;xlSqc-Lcz&r+15Y`T2AUetk_yhunn^7!< zj2P$>op-*PQHhE6B+@uCM59>p3cFMFxm@JT!cus$QxnRa3F#{Bphi{rrx4F4>*>S( z0ncTAfJD~t)b-H)bm9HTKbWfMPa~b--?4pVy8PkkZbU){uqY=xieC)%#i=iIoHqC+cAhR>llL3@x>t3kmcrf^^p_X}TJTL=;kGq z$6Qc%DnA^lFN0o0gIE)l>naS1p+LUa4M=DMO+a`%$cEskCfrMiV>c#`Ko>H1@-X*H zot)5($+g7J59_35#>;31BXqX&*Y4E;=0R@Z{BM}CIkE)_#zlvLn)F z>j-1RSeg(xh3$Gf7>wXH7#N6uy;Bngeq2acKjJ*YvaI-TAb;5Jz&IPXk$$6tH^Rh* zeuBMl;@Uxc`=XqHZz7QM2Sz^(^f%epWyl^&^bEqA-HL>Ch_YMJHalP<&Iy~XMAJ-Wuh>HF$(wP~fc4AyGw7)vbK^b4RZd1bJaTOWkM-@$x-$ybdI_7eC zuRg|5tzRFfTfX0|xC>5ccvXiKWzEpHH5^&?0a~^Jm{RScS42lm6j%8}zjAVQrq`X_F&94Sw%xVaHj~XXb;IntMwBf47%jX5@7#IFOx8k)OS7Hu zaW}f(Z1n#!GubTAwS0otf}bO_sc;MH;bR3wZq@NgH}7`M=EakBD=RvJe2Ne{Y~>cF z=tZE&9%khpKJDDS&D^q{_~oza5HjgAG>PGyYNdv>GS--JIl#}lF$wt^RVA5I{jw-8 zLq11CE(JrZOUax6dFLM2d=rNHl2DNJ7brcPtz`*0Uv$XEX2|G(eCsduHuF#srl0*FVsmAB3;EeF+H%CZU302PHirzeXe* zE04t0M)npDqUk&kzU~%`=~lYZI%0ZJUlx6X7SXZFRvVnu8iwdu^)NZCy~s^Uzv*Ud zZGKQS3m6Vdiu^5-`C8_gkNY(z7J1?MoSSbu{FG++$aKeHVVUq9n!rvj8wF;g+t5^N z7EQ(ggLaF`<@)Y=j4&+~1v{u0mJ#2h5gdW3=>T#vU--V$6Yjr@2=7OEpOTW|{(xNe zX=Vp&D;=y8vSlkKz_q$d?KvzL^FxOwL=-XwA?wt{`w@A?n_ZV?w`(f=*vSb`-0pAb zX4ebLh@a314m_2nSg70t=3D*L;R%;LVwQ!giTE=T`FOLWu+p0OSeqmK+>r?bY&Dr> zRY6gIK`LMC^5bB$+s1-u^9Wk~(zyx6YV;E>NQ(R`Cu75aH!Kc$P~ZuUuU1% ze37mdZ|l$m^rEt?I0Cspa*}-jiXU1TJzN0)9hV|Vs2^NHa6c*vig`g}7N%u!Mgwvy z(Kv?-IWysAn}m>duy;D-dO}{9NT!AR+@ht<0^*|=an_jHsClB2#00u1L9F|-*^P1U zNd^FsqIR(qnhnO*Oph#ziFWa!%fb*9F6$C0pytbxm_U~#h_S57GJ2?ovbmU-a%93& z8@ZOA>xp`4Qt2mH?*Y{f!o$o!Kipo#R#}&EXu^a{6N#m>G6t9_3VT_*SoK~j<6Ky3 zt`{!n+=S3(t|D@NIN^h56czgNcE!UnEHrxiR>BpWoZz9jh`@g2VKGUHdqr|L;7*(K zSaHrX_Ytn>mE3@Y!7Wo0-wsAe%aAM65XN@Bor)^bnV`5=Czk``Lm8d<%EH%ha6%D@I=Tm= zC567GU9!(*&R4LTMW?Vk2-k9QLOk1KB71nvx~M7iwTZsL|D)|H10yT4F6-_tz#zNJ z3Jna-+BnSa7?S{tETNNhl4d&Hp}I3OVcp%`-QC^Y-QC^YzjIE#y3hMnfbU0Ux;lC1 zocCVIty{NlC6!{_4r$cS15HE3if5>jRtLs|OUywOgP#Mk$9K1RK;*$jeMkJdp&BMv zi@As-o;Bp*8cSiqI;q_Vh&$ecGx-iN%82zn1M-hNzX2f&EB-^tkDmih*A9RGFIG}| z?l2>buycKW05KMUL~}T4IAAiAraNj)GNJ~Pq9r&R+^vjh%WTcrFqA+tQ0F{)lJ&n^4ZfndDwsXMJy@oM5O8ePOoDW;)Iam2GO@FWgQpNhG~_IOIB&f*9#(?*CJcIz8wgxy$k;$1?@6+9BSg#y_I^Ufcf2k%Zb8C>sfXoK~& z3s7P@jZ6muX|lT?S+@6cEd|pXeoT7CDLf0e)mS6UXk6=Wdn~5t??(DtgPvwDcsiYs zYKTL4_}rq8NR{yJrsIO)0VI>PfxZ# zVfl39-P-)H161BvsKS+m-IQ=Uz-CYH+TFo`jG$d};PZ4atbxFRbnMY5x(U;BX0x7v zD?Op$gNetHNZjPA#U10$`2QDojKrNuaf;|ft;W7_<#Aoa;hJ+R?kp1#(MMN!^D@Z!*Er=x? z(ae(OIM9S8O+VE^?h*PTW_J%pNVL?S-vS?g=qqOjyJ5iaFf-RNB@wo9i9t`N^lb#M zl+RK58YstpR>a~V@mR|~!HJ?&6*GHys_ML{jPQyXhBqwIqJ7GNOUeSJ91bZtFY~s6 z0vBP_1fTN`*nmUVH94o_k_NlV5cROEM8q0=DY}q~ZVPxU1dm`<53z@Jc}WjxbANBw z4V07U%(gpq87ZHcCd%zGS-YEM309J#38bq>U5ah-=Y#oRQ3V9^U<2$NG!+qUgXC)I zNIn+<#o7-~BQ!<)pM-#>N7&&#ZJOMaRB|^k9p^PajQ~47th9zo3YVymOH83TgvN%= z3CV%$$g2EbLtw{u4no!@T7h$C!eLsCt!QYLOb;7Y;8RG`?T1$uOn)WYZ;FuhX_ zxqBITgrl@BRdI?Z(tDGXGZI^##&f~8_E87b{ys(+F(b*w8diXb?Y?AVl$-qlGC(cE zFWcD@ITAR)v&SlBhpuPK~B*dHw~y2qJ>h{eNzBmtpN z1uFrMrvR>c)oc)(8!WZ4GU%RQ5+Zu?h&^Hnt~5N68t`+#5_?icUGiTzVcnCAHKO6c zTdqf~8!tkM=*c9a<&t;+mvnrZKkKYL%~`@zj4SOEzGsr}X5cGU%$dQ`%G40~N8PiGG@?TaFk%gbP}vPr)Xye0eQU+lj9m5@ z^&UKSa>Veg=a`oBabbhuRK%60;+OSg68T(;uAv}0;xb7+9TMD%JBT@80U3yTr%Hbk|)#yE~{ej&I# zFTzXCTPY`f({M?qo+UhyzMZ7(9yy`$UV&`OJZibcbK9JnxD$jipSM6xg>NF{7wqS&jHBw+DFbx{X0aoynC04iHJ+{ zS4&C}NHp&zjoLtkvCT9@?mb3#*#rR-RMhV!H5co%GhAi@#2hmumV4i-$j2p?OF>2c zev;FaQg069H)NXi(R)lP^8w?Ih$||>=!qA|d zmSx??j67nokL@dqXN@lVWtl{xK2A{#*)S(+IO=*6x&WNKYo})~w)b=<^pMQF>m!Mr^}1}J~ZG@f^vX{@G_zE4vh$Ft%T zVrj4suk3Q5&A_&PuiDQu9Ts!H*r+wR^CywWotE zQ(rRfi1~eG9D0Bf(U(a?$6D$y)pmH8USBb$F|V-Ma&!rv=)Ouiu15sdQ<9O5>v_w4 zcoX3IksJJLMt=9^M;yTPghQbQSu(yJ?mA4rG{w6J`K8=9j5s3lAzUuvP%tb}eUns2 z;~uaN27zVtyHDy9eTpG63VW;uqpr@pnc`$;8u>@vw@k_T&5s~OmzJ~`M<%J?rc@4( z`NcL2qByX?DfQKNOjE=%RRSYHwUA=|E}0qFoLP+N_ux?Yp7BMPJvGZP1QN^l$-*u* zvoe(VNMixsG&jO9fYUHw1^vL-BL*ejW@1w6jx)%ak0lZ*`yrLloshYI+R=#haz8Q= z5f!rL5hPgR`Z2lK;&b(N|H7a`eoh%2s&YF7_?D-6PUSC5K*R&C-W?FIME6V5@nsgXy&;q7uZ%0g z#tPT#CPN{lj!V*({+eWLv&ow51VR&S`@Nr${l+*W+HA$rZW&Uy`CBqG4ya_Hc%vz2 zoczu-+yhr!HU?CgM!J4aU98_B(V%h*s&D^;sfc*f;c`9wP%tb}{gG6;rOg@ap?UaB5k>iDQqtQQo;u7xI4xTH+OE%Yi~eG)bhgDlXa*2f1B&#oB*jl| z^-WLoZ$@`gGhOB48(~E9careVvNJ|jQ#YS^~Sb+6saeXQPcgCfH15K*QC zlkO^}W*07L=u!frub2_=5_DAx(qs~0A>7~CQep)N=^<&IyP7G8X!^WT5MDwkWdX%` zb#k)vPF5h{4xPm$3$Fnurd-1qBgW|jKztQoV!I~U=z2k(FD#Suc%w;uQbxXO8C}E; zrU+t8iz%QeuT4t!I2gdew{<6%D*Xpv&#z;w5%qkiYr6yQyf-BKAV~3Em%KC#GG=e) z28p{M5vV^0xu+jP^ zMjlc7>MOeeiuI;sWhhM;inCio*w-l~?;!H=^(r(96?@Op z0G_IrKh#dDmxYp zmn3Uq$a&&!Yq}z-!&1P`yr-g_Sg|8NVLiVRcK zYe}so^Ues{wy|K#IzVZaLr{v9OsKMNotYd4i-CG#4-zGAH1de%Xh34Ok|w|YjM)TM{F}(nwPMy$t}Ww#Hdx$yAz%Fl zq~yd@i1}SNo1Tben*f}kN>%tSf=nv6P^G%b?%YgEu|aq@rr`y;H0ZUZ7NW~yQJoOfu*`V)E zH;NM|0ecw69HLgvh$IkU? zlyPokJ|=F6rLH@}WJGMlA%C=wW2q}8XHp4WS-AzSnL%%PvG2|@_6Rq2_41ZS5*6!@ znpv*z%tYjG9qt&6b`0Q0JllvPZrkAW{H;`o#y49D%PLTc(dF+jWwdS<#2S*xo>jFQgD7yp!Ee-^kl7qb=`k- zQ>Gxo2rH~uR3eJ`0y1Z30Kr-@Yu&YtIKlvcU?n=$U57#mQ`FO>rVodwuQfoMT4x=ISjl5iO7iKeoQdUm+9%K`340IvKxj=(Y3CquFU$h{6%gg`}Z5 z73Lwu*$rtmUx$}^YPyHmyP?}-0?uj{5P*Hvs+r(aut;8y^04-$UKE{JXBv4ZTiBKB z7EMFMDrc?>&BBPJPZC9X^jZv^=AJEN=pT(RLJo{3;#pTE;PN=)St1WxPIb6>d!M_= z*kY#7G%fX1>+r){{xg{!e~XrH zmW?gK)lw0{pF)XgZ(zb9>Ak(3)&%~W!_Eiz0r#zjDY;zFu>>Wi{bV{C_rytnHv`HM z>!?%+TJO)c;o}NvJKTy1Ij{Z!2MaDbTD^)&LNBIJc8|dizlf#!U@z>z%Tz^l zj{!>0Clo4iCE?zbAR7}&^Rb-)QK$DYy4Bm50YupM4cN0?Y=%wt)!y{={xf zRi->*^kljR81V(oZKXf@qv_>8+ow(4XyY=CgoC%JOUHZgGt0mQoY5u zwT}^mr>Z@~$Rh&93{REGN+o+Tpi9j|sfO#~=~LY7{oQ&z$!8pyqb=?aGd+RgD;wtBF-N<{8E?cO8*E|Sb|`F0aD^wz$CHvKL~gb>H<-b} z;ZHEWhAwjkDU}As1;l+wVz69N~w~J{okR{=%lt3T-cEqWi z<eddXwl_9=Edw5 zraSH#CMII47GR_#6nq3~22<3}4AjMnYg@|QvsObLU@9AfVT$_Mq^8xG+w4Z)@NPE_f{xzKS@FHW1a1B;5rC4HnG1=%!DW=b0pPrmm_7dZasHEdk zXQY_oekr-H0d7PyVfD%zjW08@h}m7iF`2>$|K-GIP0XHpz;xB_6-E-_g{wP1fk@(c zC3(uRYkQS_n1_ zQ!(bbH&1Z|CZaczh`o6!#IHNt)W%VrefK8gJhl1GQyAav6eAG>s>t6=^1}ccd@5DW zn!yr12SDGmk8#o@=#ukR%E_+4X6&MA_cr5S*Zdg9jx4YPrbYO664KJZ zH}eeF^vio;SHoOy8S6IhFy39wyaC8B3?K!k9zgiE0$w`aNgWLHz>a-)*d?s@v+wXF^3tlh$eN`z$e+y{&dj5h`vr^Ji`Q-HSW!V@4VAo&ts*UMS=NMf!1)a%HV5qhYeH#`R_Q z31g24`!F!kTPc$)F#|0fpQH{hFl7_mF|psLj5o&mLYCo`#CfWZ9t}7m`7}xBNlUwD zEQZW>6=B!Mw$Qni^{AnrG0KQZO$w!ZO^X%C67kt^r{=h2rhE))pEKG^x0IH;Z9h*c zF4WtSKbl}Ee1Q9z+!u^|%nG8x#77PgT(|v2YN5lYZd*3J^Z1?ifx9o6oYQehL-R63 zU)yRnm1ps#=*v`eWbWzAaD9(!U9F89!zW&s7&JT&#(l-4ol`HZ=}AQpN#j?kkz=t{ z7Kbrqkeo&IHIp)CS3$EGlz|lK*Gb9|!}fP*lSg+r@EK^_H)42$5ojQKJ66g=iuar0 zX7gPxj6=*J+_#K$O!Ek&^*ydf6W_PV$G$J@q{;8i87?CUm}AfT9ixuu5CWLL-O6n_ zhAbK1rHng3M(CzPly4IB3!h7pJcS-qCcs*)JH{!CDhYq3gw#ccGchoM`kT>3OwaPO zUA!)M`UE5VzZ3sfxWzE>w&^&>h;05Rwq^GZV;*Bzg?3tk)H5%ATSX*o|D-k=f2B({ zEc1X#xFM233`d3xmV2{X$OFe%E@0zc-URwS*+D%acok*Yb1{dDhxBK-3}fKA?GA!N|s#+@X@f zX&NAOvzc-VDb_2Jm0e-!Phe3kcKh%EyDJ&>R{UW@-OJ!qQyZ@MuS|ZP1g~KNA`+_$PUMZi(s51dpvN;fNP~-&$s4Ce z1I(Id-L*_XM8_Y1Bq9_lP(^=j($mq&9(&~4{b7$4>^eqx3VzpA!A7&RZ4%dN4oDo= zB?mj$2Ae?3t4X*APExp@X^1qHR%?Ug`6TuFl*+n@E%&}N#&JzP-dNTD2Bs-uSuSkb zuf`JJ4avvFhG1-NPb_;ng1B*F=x$_`5vy_mM(;ipN|>U)F{wEu=ZvG|{3CjUX)6u4 z!1K)$mfTHD#%^5GP$LXdI;=#*Is$3BDK%x?|J!_?9e?~WdLcX&cQaEG(HLyttw^?$ zMHT(cNzduixFgVXR$sR>>uzB>CYzr~7Euo+`$h4TwLl?Jx1=b>gJfSPtiiBg=I=t0 zf9^n&G{$uohNOU3sw>st5_1s6&~2J~r^((hwI{w~cd+S*=rIkdTv>|1(y)daI6$Um z^`uYgfz|b7h3*g&aA~adBCO~SC4F`_g0v z+0Al7U{LD0+nAKgs+%9nC#gqJDh$-` zMu8-L$**SK-UuUn5*0&A7EFwHAS2h1E4(zxT1)B!I?~u9){q0BZZ{M%KA*Hp(SyV|lni4pw&kXmr@RLM(`Tqv{+%d)+@d}8c2T%`|6s(%zVtiOU<3AMuTe_Zl~EFy8Fe>B0?F5?#?D7qJD%hJ3W+8 zK+IhMROELd`Ehs@VZ8I|umqQPR33%l8Ah;{t= zv+nMu``gvt1SncHNjq;Qf7nTD!l z@#GT|`;lWakGyban8c~(*IU9>>nMF&%O;g)QYDvthr?cLVq%;B>%^USmdS~5QWL=U zPh})gaj8?CBc2o^dRnLMIAb#LFOQwiYBqp3PvWS^SMPkA%nAk1|Y)OmPp4!d(J2T60owZlP zR?%f-!NfLAHrhQ!!ygr9O6%P-Mj5e45>Qm>h-63LIhqTY-s8mSiW;3_dxy;CbSa7fu+ zDq8~z%)5PFYX&JIxhtyJXfQ%Lz#$UA^k;`|pDBtMR1HmvtW<+7CHtv_OCmn~F|sJj zZN+nJlms0K32}VAiHNA^Re6-lm?FQJdl`SkqJN5@r;rxY zPbsuy+?z7!(X{a1^;M@tEh{PEeN0P)lf(ce#Y*+?(sN(xVZE)GG$OO~G^T5XdFlO3 zRK!|QK$WIY$jnPCDJ1Ov6h;@k1s%7l!i!ONFlVMsK&?af08@7kuB~ZR23ZsF9YG|G z52Qw|P85%3;)=%<&S7#7GCeV#V1v=~s?PNZRFOZJ-Y%0UYP zqoMex%w#hI#0dq5Fs2Chs2@w_yx&WknjU9-m(UeN6y4k7$-tFgQu%PfH6E};3k{CFmDW&+yRLr2pB_ze#g&$7uo?=>#ZPvmIB_M+!=%*68eyW=Uu_%8D<}Ge8 z7`dkzafDUA>Zxu8ql7-4Lg_oLPPprjSM&qtGfZGa@PNVT@mOhR_H305xU@WzTG%;- zpd^fGa#1_1dNPT8Aw{w`OwGh(9amiq zc6`FY(ep=ZzsSTz*nlaXWTnMAc*%J&u-Ok}Sm8CUnJ(snR^rZ#}R&S)d-<%*bZ>;w04*63fhaD#T4}$NzKt>1_>d%9he7L2Ii-`A>j~b zFz}XjXI9*sj6Y)TnPN#nTC9SWj5kvTcO%=j%PylDrrcYMcZ~7gyyMcNiR!JS$~ZNX zedgSa?01^K%_t)}|9Wqa0gCnQWTh!%6THefBX2q}(cC*s$CMNB+~Xy zYNPWbH|dzb;e{P07~>3N=-y>&B3!)&r1x(nBIRMl|8DZL`4yWdg45GHyzg0!fPhrl z>a(yC@Lmd#^KW6fzo)fP|Du%+xfk_)#v9SI)#O=9utfHLlCi~=PUxE*uv>sNBi;`f zZ$yi0I;k%QEg2uA3|eS&*nW-k12OyJF@oWMJjAr%(B|G0d-r7waUU`@5ivReQcph= z#^OuThl3;-mUG?tWKZAj_P$&hE{IUQ!ce&=c~ZVqyIgMi{ZuGx9KEOg@(( z#r#P!ufZ*b^&rg!eK;L5&d;^6s~0CYBBu3ICLp3(1~e%Mg*s>n`80*7F9f$klx_=oJok7 z$K{|TBlk@aTsl5a9e!j+Z-&VMI)?n|$NmV4FPM;sXTjYvz>l96w0eh~pg?4eLV6z!Ku%V3wnv2Wfb>%L;75j}D~Z#N!ca)g}6 z65Urxr}k}!jNk^jb=`ftYk-Z~5CNrb`?i)rH zVR!@#Jc7*%!qBl5gCP zOjU%Fx?=gZ7FjxeOdV{>#VV?8Ebb>p9uZy@AS$c8@I?7jQkGXAhH!e}w7j1g<2tGu ze^g6Ya1diC_|*pwC&HhTknOtAUBNcz%8&bnQAW&H>-wt%OoYEAA>Hwz6HRIuCv3i7 z{*_Tim*)f@kNY<0YuL{6vC$61W~kqB&}-m zKnGB5-AVfiHW`kvSa5$b&ItQ6v#n(k6gebu{h3_s$8$5b%<9wTiG9^mbC;d4S%Hunmu?ZKB}cOTRv&gK4qXKEC3c zt&M{&;f068ht*}MDuT>+zoHeoMY z7*79Z!Xg$8qKDHOHpx8zJ_6NEn>*NtA3i=ELjqR8Fl@OM5zLd1}25PD=*vJY8g zskl5aMaH zX#v*1wZu8y$RTN0rL?MrAUG+)j*D&GA$D7!S8C79ba3Lq)lA#@c-#$UOVkoWJ()ya zog!}yuG|599D9=G;RRW>Q^9L`4HI})v%n$JM8UmQcg$ZciE2=)Bx@Ovz;P z!^z`GPyVYa3Q4*yCD9Pgo2rcXQTFWXnT#=$A@nGmDI66rBp?*L?Hd3^dj0SmauuZz zr{F7#yMYOqXnqb2m86nEif%|nG?|vV%$G5V1Sd?l0oNv~hP#nzh_J>zf*wH?nCNay zx;3B+c68#>fT^+>>y=7(;;r(V7IHkuH{dk}e#1Fu|}jX^qaeQ?_xPMu?xD|fJIng>KfS3Z4-<8<<@(!GYdIejax z$jPk6k>{91(+)&@Ac88~AtrR9S!fEWH)>|TzxW&$3CPM)nO(jV)ewJ z5ry~WZg1@Oz*P-xrD4FRr;)ZhP#Y(|Jf&uYeI7#_%h8j=BIs+Y&5@=k#;7nT|2C`` zhVTrsM6@V^m9b!fBq1F~8F@q(9w0n*23@zM009BX_L)06-w%1{U+QwdvAj-wRXMRY1pj8>Q# ze*vLmQ+_VsjyL{@HlA}#m%&7MLLk(Vfd0Xo?j!Xl4E)~EooIw(q8frBR<*2yU~C&^HkAd2=B(&pke5MjdH>UT7{F?Sf@cNdzd z?nEkDb@|<%Gu@>HY=FMA@kO{sYPNfND3RTTWcaaDV|hFc)-kY(+IM#~t_Y`F-k*y| z*9?-V){%;X@^+k;zJ9Z!0$S@gC$lVsb7LP&uiZ&3Xt&r1dv>}R6rE%X{6=!6_XL1v$}}E#&u_q z>7KET9UKNZwY)?N#%(qFh}lBkf878r9e1M+djEp*AQm%MB(wC#V7R-Rh=}JOkSd=+ z7FGhbQ2>4pNRCwrC$qGdVg774-iR)F%(*NC6Wb25@v+lY#T+i97KYc4O&1sz` zL$cdx%n<{80Mi|ZLJhKH?4k?~@aaus%pJLDq#SiC?sOwxhrelPVAZ#sffM6yGSa?I z?OWPoyk&I^+S)c@yb-?aDl4}HDeg&fbKo_ToF7`42+h$5cV`%FObZK!ysuuNltUEj znPfd2tb8F1r`)u2`&Op+V6b>w?|Y#=+yXZsBb&}L4G~ey0ny%f6TAemmRGJwa;v&K z+XSqG=7x7395E6Q3SmW(a3Va1g!E07C(!L7MzYZ){nUOn=Uk(Y*wbQo;O&W8ou5T@W~Sobsm5k_r*5PvA-@I-k&DYN?LkjT_jzOhbIZE6F-BDJ0HV7Kg#w~zr%4+e@9fP`G1&y+FfOD=oLGS)c@gOm zXH3X>d==vtzC%}*U`kK;u8~R#XQ`0=EwAM&ML8W7sxP)ZbWB%7e;dFgC=_atC1Z{< zIOC*x^t0G0Qnxh(h8A44^CqVq_n18BCD@80K8akQNH&lpnm7qbX3R_v(QaLnw60lF zeL!*oC&mlOco5L4Hop2i7T74V;`SJI#8Pj-(#r`2%*?`?MzU&8V2S8aL~il`bCp3m zi^h7XCJ!D>RDDv>Q#gaPAV~lz*3d6nMtJ2C!WxlLz&Ileoh*v(Hv2^uR^&@0=P1ZJ zXyN_n{Q4aR<-3cFI>Idz8Mw?xk1P%Q zgN8~E5^QQYkXB4X#1l`E^vKg<#(|UoOTxvJz~RX97zD!%chW|l>w6h{#9&*M!yu0* z(tDE>KZVrtb6p2R?>}bm@bJOc?t)#s2f)n3;$;TOU(YvVxD(ktOILU6I z69n#O3L?Vpije-X^Gu^9I~B^OhAHw$+WjewO}%KEaQcfW)dNgRL{kqK(hv&4*71NM zeIQ90F;d8l+goxEGQK-EKmBmI_(CDcv)l6r`aPIbM*s@ldVPpKtZQcO(5|V6n1YC> zKeM_h{S1#D#6zj>FsKWm=Gm-91Bc^Q7>jz*)B<-|Jj{f}*u9Q6OmZ}41iIuroN^dL z6z0XjOQ}N#mTPANB%7AUmonk7^-8E@y!BG2ziWYjaXdQly@I7^%UbM zB?CF%gTMh9fOjLPX?JdxS)1uH{k1|9CzY`ajd`r3qzUE!y=HFo=hgLBP)a! zUefRkutJddXxvkbFya54pYV_!5X^f7+Hj&Qk4ZOk0;XSlJxe_&c~FWH8Z8DpFhKQ zs)UxOB~km&Ga-@BpTKM;Xui7rRgg*L^QkiIE~rxpvr|2Pe-w1Sz;s^F{C0w-Se+$0 zDSaW8vZt<&Pr^kAv-5k$Wct3yBu1>}<}vjo^Iv65;2e#ly_nKyh^3v|WO96krui>1 zDG?6O6hku7Vir=gFC{JgPV)@jYcu1y(&N0$=+@zvHH|p+J0T$(f|VFFlI+#W!-?wU zq~ZiGSOS}p9mlFy^iG%wef9Kqq_GFcLYKp=0 zn90A&*drcuKoNT=6d^_YY7+A?Tg*TBFuQq;(M7Dx<(EwINMdLgL-#fl5D~(__D?to2n8m4s>GI* zw^IrYq`c1`UF=26Xy?j(^bXSyk^Mg4NJA+2g@i1q$lpnFcBTbC4jhGiknUYZye@fI zryO+@a%&D-ODm%VEUyjR{86EgsrTh1aH4!SDe2G2OG6wa=5RA`WlSy39`D{`#1T%r zfK+kEc~}W}F9py;Y8}yOVZdSjgkjPO-TO>Jj5{akA0=TtutdC{BG~ZKA;+@LATs=- zW#q1RA20Q1P#r_9$(@FXx_c3FSc;o>}?4gj^SCQgM z!pAA$7)YRv=KYW5&7~7K_lgd^%{t);ABT)A{e;Q7C$6utGD16y-dj=0C8?jJ)T@F@ zgD297x=)#c>#2enVoQF>h|`2UO#kS zF&&rG^EL)ua=uDA*GS$h=9OJ!({Nui+K8G{ez6IR@V`!cc?}91e-JtZuiiI|DPry# z`lHIuNqD0CCMjw1W&%f+_RYF)8COJgt%(=`jyS$e4$l13p~;`<{v9KUu&cwxx}i|u z1RtAzMDtzJa8SvI?Q^;~t+Sxr_lz&XoU4fXO14&}h~oV|dFesRFSEkwfCX3g1LGXC z=omWB`c+nsCcYn%4?hJy%nK2M@gt+V3;v>^udb&PnAm$Kb=uA&T{vWTo$e!*tn+$BY$r3_2XVT*nJb|8 zbN@00JDRJ{%=k6UBDi$?n>y(4$h{;bVwmkrFVDOGn1YDukAalmdkQJu|B{#UO*c1l z(%QAH_5O&DZLob>@ab^?nR5R#?g%$S0wm@!pkhA&W3%60UwBWPo+Fn4wv8YnYUD15 zzw_JcBc|MAD?lDtA}&u6n(=Cna?5WjVzeyl{{YJ%S1|gBUMjg<_n&+&A&U2kP zo6zzd;cz$1@a9&gl5xOY$;cxXjB_5xT1?Sjne?o2I2?|{0~0?^IVQTDULK8b=HFF} zIKp13I$qUDpGuNgBD*%pIKY>7TXLRT+Ru3% zqo0VNw|ua==_m$?x-Lc0e-RgYfO&V4z<%hiX98!N1zz%~gAAn56IbNmdTuL_0JrMlgOnZ4s2%8-N?8jTup`~aw}EfO2Lh(fHAru z!Az@-j$Xlp= zeiRJ3Ry=NxOF$I!;bdkXoX*)c`<*5na6_ty1j`zt63aHc~8G@%Ngj*$1;nw-GSN;1xe@u!vASQ zf$8o8>yI>5WBPHNz&IKl6;SlRLLvNLhL)5TrLgTM-htgXz0J+GHujhbgies7j6b67 zSGX98fMPwGtTeeo-@hrDJ}kEPyJL(o!mAembkCuXK@;6t($P>4e}EaB{B78K-s#VW z2+CuPIAYumP&wjCTuC^N5*S#Tn=8=fo6%tbNYi}0(MPzTtLF(>Jkg#&TDpfbE@-7H zKhele!EYNnwHhBZfy8nWS@46!=GN}cd}sg8=?f7{j1v+O66sDhstDI15cEW=ki>Ng zx#;A`J;*TWa(6Vc2z$NyAqFhb-6_zSSKP8OblQD)XQPXFX8}St7z*37c%r-uDH%)_ zny&9fblA*r+hE?z1suUO+u7y12$O~{@UA9hy7`TkprxtwEq0>S5=!ejYSp6O#cNv| z&RM&Kxsf$7TyJttYL+vNtEGS+vM?gqK$4?@;*U1Z9I=&tQAjO?aN|=qPr8jJqSGwG zpI^?V0AD}67G6KP;GaY~GH7$(I zCi9GDnOR6Zv+NhO9Fnyq$g=Ys=`vVc9w9|&4-Sj{GYD|+;*^+q%>0+!sU|BTP{|;r z$x7o0r0Fzj;#i2B6*b92=_rsnxYYzjxVuw4DN2j>39lHo{#1aMoV!sD-D#;+>azi% zYo!sXpvb$Mj)-|im04PX6!$iAvug|O5N!{;Enyn=xE{RSSR?wH0HJ#ig&dwJcaXAj zRwZA(Hk$7=-iW&$H>)y8QSTx(BM~r3koD_MH=^V5b;FoccJb3*7xHc*GiG>%*?5cp zAUnbmPg`zn?(gloL7xWcnQeFKG7@LCmiu_OZo|7|j=r>d5qr6x{K~bBH)0hg;OS;Vp|pZ+;1V-UF^51* zFwetOpp<3G&6t*m@Xr7zEum10Eh)24YJcH z#wXdx7qZgK8(T~Vo=X9Qp0Ns2ybI*z9!3hp#^la4{Ld|%oIG@0V~nU@x%wG*kE9mfi9hs4Ikub*v4ME# zmW?{1lkkAL-3V-n*-J4TB#M2?oEt1AIj;AahKR-6fK%B~rJy3;Px6C6K38z~()GKv zJY4Ym1l)=V7~_HpVOA-YB&5akQwl8^7gGioRcg|T`K)61G7%BAzaqY@m{lxI4SR3$ z(;4Nrq#^7LgAGSi=7PD8(MI?as`j*{Si0H!l57pgik4d9)Ajl4rUrZ5{fs$ctW44L z)YGCj1B>92a(_y>Eu@6$LF#AFI(lgTi|sv~UEP^Iuv#8qn$E{{>=?%E8Ysa^TIt(~ z8Zv2oAT`pzkQ-w#sdNuADG~mKTrX@x3}8g^V3Mo>NiZ@_-El@P3U4|2g?osRMzn;0 zrrQpMI%o-bD222jgx>DDIP&!)#Iz4bFh1;9g@>7}b8vCPYl#jbFB3`R!zuE1iO6yk zU3MjfJIXjKbB{2Av&{l|^B#9~ODI~r36zrhNJ?eEAC~<`%cCyx!_L@J=f;i65gjdL z_(asiOvgRSWR6*FH>_10eiIm^>d{nnEvbT!lzp;}_j-(xN7#h$%L;P{ zqI{BZUfN{^po#9uq~k-jC24O=>R(M!hwdrH8gZxf_Gt!E)K4WfYj!SRK%rqipWAlk zKiSspCwmT`W-`Xq>@b8IrhnpChmx0L5{{Hw2B~^FRUJM?)v1^{qEGPqAfI8HBEp&D zG-dIn=$TZMtr4Nhq6xTXnSe`LBQj{Bdp7Af=i}k!_~Aquwv@8UJ;&H0+IQ~o^E{A< zo=c*evFeq#c`)*P;N0_!Z!`X`VNeJ#dgGyhy9--KJf^6hPiiivArUGjdpz@WWpR4Y zlP{HST4cgNU-|;0kBIm)D9=9*SQ=hP4K!V>Gj7eb`!g$37-k1cqt+G{PW7MtSMEio z=gj)YVQ@8ldjpBIy_nh*+g=EX9P(_XY4;Kn65(R2H0MLD^ME3KDM@dG8%qbz>_D9T z?Cpw|nTmG(;|Lu>3VI1fWbsMl%PEqTww!tmo5*^dxsiK?$%=4@WnoltvS0X=ZV9gF zUrBoQ1=)DX#53n!WyBG!vnp0H0TR=z$wV*IJ~$VVBA|5^{=a)?Wm8uPmJrb2!jO+& zVEeoAMocgRoNhK0YOy8dwUlDU2CX9*8?Q6kh}*3&HUf}1UQZ4VMyY>yVg{LhdY#$C z!<*0k^9CcI#6=A~Q?09a9ED`Pk+SI3sLw%xy4hZIZ!$d*15qAKZzBIy*m8`1XG-`I z^Ja=+-M7Gc`+)T8J@%MyF&Q!TSlH$w1%_oG5>Ro!mE5e3g*E4_QtW}ku6*}4(-2V| z1Dqs;Lb9G+#+H<~Qwrz${+ao{sFgWPS;3p{SdEGtQs#YBV@t(5sY0vPNL#dlsf&=E zN^39gGUkX`WoaECz=-7CBq`?PL8Wl-F^-e*AR4NFO->%*h~&K_q2n-2tGD?j!|rf% zpJVgv7FYuBGr|bhNdVH54223*(Z8ScbOC4A?GadrwT=%MX~bPtuKx>1G#?}l4ecuH zAIaDk2D9!%#uw3g0*-Dn6tYta4=VBxlbnyZP{Xmi0^V_Sgp1f<@eyN>c*Fsys@-H! zMgLLKvl>}iWE>5}Jslr2$}!e!nx6(eLwgZ%2t6OxP(Tya$4SMNoQk08nSnNPAX*8I zGaOIw31g4&P6a&Oa42N9(-*)c=93h248)jCRylX~32Ir@Z5`iz%CwE~QWBnMuL*Ib)2NZdSP~ zOOT@eJgM0u+Ec518Q3S!9XC^n-N-yISU1q4_<{)=6NzK!t9U2%oKpTpD(ARMZ|**a z<8mhP;VgVWI4Yd|?IrgmlNQl)7>w$&m5QL^{xZ4w!n_l+)QL6e4cd3!nceN@*qmw~poA*{ zKcs*=K!9}!mEBF~W|A)EN2cg(Tv}754RIA+OrA#ye@um(@_<{0W^1SVxm`%>f_{az zzgLxn(*4A=Mfl$gQxD5ZV;Q9Cr&L8lpEnQHx!BhQ!wO4yQwXwT0u&l& zzcRK6k4wN1WhmqUMfz)!;wL3d5=Aj*hWm|CuEU?y)WxJ;9Tiyelo>b?{+5J|F{J~h zI=9$**M18iV)YOk}bIV+QW%uoW4FB&Mo)c6^uJ#j%skaS1Z+l>&~x8 zEeAnM>iAs0gn<{y!gxX%`t^QqfD~(2G9eLR1GNs%Drm{LGG%bRHR(Zo5Ue72LM8wc z5V4Q4q8}L`By#LFY6-2rf6RG9z~& zlDg|t9j6b$vqw{PcL&}GY)T8`pi9a;U1AwMh`WJFyhpRdfb2Vqpka8d_&A zgv0z>e(Bz%4~y2^W~G(c0{Z*ZVdpIJ|RN#?2<5@|b#+IZ{c@Wp12DR;2(k9n!h z&SPSUW({drqeCyeDOvo^#@*xKr{#`2#Hb_srU2(FRDxNHEh&dm3cc8web||g%l$q5 z!M;AGhci8Qm}!Yv`J)DJ$NBka1~l_Ai!VKgQx83mwl-%U_927LEKZBNmC1;ivox&y zC7|NIHMz4^Fk6SVytvyK=OwX%4M#LbkcL+9nBImnwLV+~N&{fr5nXK_v#C?d&`8?t zDD6l-{jni{vz-yfhN)h=KflZq4sUM?$Bb77xe8xWfk+zfK#i=i`CAR0^^{J|fh{>i z!#&c3P2z%vJ}Zwm=Fw#-B&kJ7T+y&@gC@X~JId(CnDfp3hQLI0G>JGGSffzRx@81l z&&_cFJjNI!Rx}L6_ZOk1rC}r$C{M$o1cBu zsbuwJ5_ud&a;rjVvYii_*7lI&O_tN#FSx|AGLT=8mEYg$94YXVkSh@$b#of`UBW}4W z&a(h0+B=b!?>{#k&BS1??=9k7G#^fRXHyWdIobgA-mO#!@yx(W&t0eo?t{`!jai(F z>F#O*BJ8$tJ2Z$UzIEi|W5Qjp35a;=Su}~re!;E#MR zS4N#7G8f?xrJW||vKlPw$s}?YMKY*st~bDOq;UU02a?e^ce+U#Gua6f%KQp+5lc+F z$#fu?!oZ`Lf{n>2)LL?jutFEygwaO45`)q^u@d77iont^Ne%28Gb`cAF?}#Gk;&hk zVe}E*am7-24o#eAl9Q9XYI)vWH3Zh9L*pPH}p$ zKN}9`+D3S3g6s;YqMs%`4catNVH=il?Yy2DV~%)nDTeBETFgRcvV~Qu6VW*4RW0E4gMHxK3j7)h3!-K-( zeHM^_FFif#IR<*tG*c5k8&A~vzH*@!wmZS(p~Zqek;;rgaFk6cT=4*v-8DwizT z6-lMJPtDf^C#)ijyIAhT2RAS|5#2+f3(DY#VTlZUOBKPReyJN<)e&~dG>#V;Z^Y0O z@brp8ArqNe0GAj?G3+dcI4@_mrGJ;1(go{6GYG92nt*lqqo(&D3{PJPC(02i4+k2J zH9XriOQ|-!2Gh+ijk#qL5Mg?yhzRUrUL>JQQ2MKcpn^>D_EH{u@8GTtArw0MtKXT2 zt?u@js)&v(07*wERG^A}Kj}G5R)@#4==FiI$~bs@#HtC#jU=QiDP+(@cYo5+(U6Zh zU~n=LWj-T-&+>RZSyIPFn#X-FGN(W=(T1gDUz*lAe>N-1J{w;?c#* zH25gvjtI*(6p>rWK6p$qKbp+!s%ovEp?1zqWA)cP#zaKe8?h!(iZ4Nr4Ua0{4xcXh$mA7 zTj#=HG{jE}p$>YY*!bX{V(bw;ZvYT$C}hw?_f*m`5LZR*=SxDJ-=}O^Is)$nCS!NA48iItL=#BY3#lt>&TdKq53oL_Lyh($ z&iF;9Bw}<57OcdCLItYmUrc(wicqgNw5L~mdIdIo_Yz}{SZWFwxwer96zNMznp=nE z@Wq8NUlHBQj5WdqVnd(5VIXO6%|nX!<>X~MFejk7JLO44c!bg%XnpS$rXivo7@)5K z!8={nAZ>r6QaQFZ1urfOZYsfWdR;mM+me*4Y=L?~2Wv7bn4Mrbv^8vxv+(HTu&SypVMiQ#pGR=XqI^b~o z0p`z0fHFH;hHs^X3gI{QzsdL~njd~P;MLdMW(G-mGbLRQ1lHrucK2e(FFNnHn1C^z zce4>6@I?34@C1DSz^*#;iNh)0{@lnXgL#_?h!{~*7~Owb%zVn@OVHaXh;^XdottUt zAKI_7i-+zVrXXSsNEiF+SAiwAcan`}nKw;!Qm*ZddzX<#ST6~H=qkX(_HMG#!;jNI zms%t2nsDzijIgWhx`!1EwQl=nX(#M>$(kKo$K5Nq-RNLtm@|(R5Ud)A4#R z)!-s?A2J0IOM3xJ3PPb8SRy`55!dE3SZKriX`^4|(BnR0q$l8)4R4&+`CHC`h~T3n z;9$WlQxpB}+#DJ&x7m6F*nP~1BC2mLQ&kyEgdZp28W7sTe<9EW3qn4Kj$lDGS(A@<}S;l+K67`5SJXhJFH%prZpd+^0-K#OB%@j&$U{$stR@r>TJ5RDRV3 z`-Jd{-DiwDVnSEH?BdbH_gV6>MTOulGhbA#H#7UY`<&56w5R~2yA6d3RMCH)^lVX? z$x!AaBh1mJM(zuyAmYU@M$Nebwsd@vI%MA#hIAe%+s8>RUort3p|+uQjWrSyMA3ek zv|RPdLcl;@2thV6YV(Ik4@rK_~7TrY*v}DuSsJ7r!hpNX|DYhe02q2j2oiIMjL? z#Xdv!w@l2|<_8s^Bq9_laV6o~lt2q_3A-kCaW5jgrJ5VN?-*^wVjC|PWeG<#-z5!u zj3mVbhpOb`g~$W0>=o{NMi}80tjJqY1uFLMlN~?B{t@d^Tvhym@vg()G&IvO9i)d7 z;}6Nm9=Ukri|ni)8DWIsSbx?FvBdUcveiwSlD3*G{r|+MFOz9g4qRG(N-cEu*_5H{ zh6~*}Mtc9u$j5jYn&%7#D8iqU(C-)=?CD~I{&b7g-PbOjfP&DiS@#R0j zv{(j~gkOex&d!vFe!JeUj5xx2lM(nP113*`6ATfH^v%aZ>0C9 z7MvCW^HHrU_-JRJ>c;J)!ow1LZ8-_rS1Vhx-c;fs$IoE(Q^l4dZ zH*Ap60Vsbk_K4`ofR^hn>!2m%j}(HRlJVoiX8&aD5%(S}9kGW(7y#0U**}vq*QT(^ zy^Zb$ZR#(^cd6Qx0qP$ANpE@%fec9Q_%Blw;g17q#Lm<4jMDpW>SZqvQ(~?=zsskEnP|_UugiSF5$!)F zFv_LPtFuO54zG$pdj3m2bXQnUkH}UW^x55uYVto*Fvj$4?&?!SQ67L!N1Z^G3ojXq zd^5Zp{?g|Nh_L+)O?Adf8TVioTS_iZCCz)ln5qG(MtaM$9cC_bS1?(Z@gA@Oku+YB z8fkP*cK5eN-Tg>TkIg}|{K%xn2)n?F_?1l2m=~4y)%(4bHwIDES0*((kkoMoMs93z zS24m<@cRa9tIBJZ0Ey?S*+X45-W)_B-R?J2Qsk$aral{CN$~K?F6Ge9|QxMUEhwCLE z6jCseU7KWwfh-KBXZZcFVIa$m&et*aoz1U50E#^n#-K~ibt#7?l7Dka=tnDTvq1IX zDQ)h0s}Yii%G8S*bP2hBcrO0xWOICrqX|0cmUk54uwadHH!wM8G(VR-U`EbZ4vD%U zMd9ZFtip1N*_v#-t^K(HmgQJY#>VEz z!F?Dkk-J(wBUFS3WBbOEd?pK15ItjBuD^yv=va|I*436=2Ibs zXwdFpBRD?dapaDp3m8IPL*%k2)dx7aLyYB8c~U)^_zops)`hq!aV7Fd8LSJq!;CUw zc&>CI77#^yIB5^XlVX#fSzN+$fa8D30tylx^^m-JFkFU$TbY6gk3qoG>kEZ?aEZA! z#jtZoTq{d$Bol1y(!XK3^EM_Trgi$PaURFF(d;)lWGOg;3K+ZRv)67J^ys?h!fc2e zyshy^^ope%xMe`m-j1}_Wqn<2BOf9ENuq3TZ*&o^H-9z!wucpQ#Bv9+&$#n+ zvwsO0Y-Zh&Mi}8@Oo5UPF!)`(zaWDv0WAvP+LO%_V1&XhJIXl6*dfhR1c8a@XcEyB zvqhZpl2B&*u{*{DM3`cRse884SO%$DOI4T+S?G;F3~p*pwaguBA|m_+m610&JW(D; z%7Z|eypW7zRJ-Ye&2hYON56G%{*T9%h7+j4mX~#W`-w)k7TOxdtitj#0fc)JaoK2i z3qrMBarWIi@Y)5si@a zLjTS#FD}vE!vUCgHAN9)8YWI0N0P;bYDCelBQ4!In6@VevyC}g2fH>9LFPjt*PEVu z);}Rbtx3!h$0em3sFbGCQg;d2O(#3pHpRd=c`rAbn25Cj58x|t8B1iFNT$&h3-g8^r(ri7}cV+#ApZ``TIAK|PCaJuVIsKu6) z(5kUfy2@sVxBRo;kF`?-fatFZIj@=YnchUjxv%H;cj1dET zCi_4MNnGcU3qN@m<*>cDw68mhpt*C6E#hU>_$dudjOUS&b;AzZz^x_Yo4cpcMKp)f z48z-i36{vtCmGv*CNw1T5tDOFo2QI4VpWvyF1t#QLCbs;uoPTC1q=kt{5JD3xYXg4 zrUR&LQX&kfI4OAsX_^jCrRc2UNvFwoZpNfUL@os&)$CB1Onhn?RMF3po~BVSW4_@#}`pBGaUDDB@Tk z2YzxJRI!Q{HmG)uXjE z*pCbA9PXx&Pr#qVvuIrFnz@R{`<39t+9xY*CN3KKwl>60DdbRU;hUuK?b5>Ez}O>f zrs(sTg)J#dl)~YZfpN^RIMm*zNqw3}cH5!PyLgf5h*)h;v3#8!3oRLrGBkT$;B1dO zun+(?>Lj!0p|MAF(p9}?7EOdB61I5XF-iO;;E7*az(xhPY)Yn^YeNy+KaEupO6*>W z#ZO@iHHMXomIvsJ_L-;%!#-@omY7iRyQl-8NcWSJMqPFf9?w3)q1-D*7~$xx+0N&o zM0GK#*b4J|wjqaKXjkpMj4@&lQT@j445XOvO=fN+P3P*HI&*EjBJTxA!foI}o95vh zaG~ADgq(@X8oJUHRZlxDjwg|{`%)SQk&6A6TqSV#GbIs&NC1(JP}r632PL9t?@wA< zFZm#2q!2}7s0SG3T_SGYdt?2b7f$#9nAjdjw(RV22Yk|#uH7GEs>~ITmdzd?WZV%w zA>Omhagm2YEw-dQm{N|yZEYINt~iF|^Qa?Pkb-$|-@BHlu^WHutb3S=yhpRh0GWiz=plyhsNP4NpB zNVenPHSQ56;*@3)HQfe*MDs|}u%hOZW_gSEQN|QeQR~xXrB?2v$;L>BTwaFh!R(5A zjB!Pn^x<+n(@;>32V&{Q9!n~Yg+(*7J7jFhxf4 zpI~eelQB%+IeLpM`7FOJk;L;v@~}@UN}&afanj4q;23m}y@mmrGv$)x4NkKZK4 z-TRX|M~q?6JK%G1PchzznB)}2Gux45c~DV5mDFoM&Gy3t0EI*V+^RfUVWWDQNtnc+ zHw-LAGzlqwnWO_S3JH2T1sw_oxP@eC7I%;9%PH>NcF!XJpdht7yj8<+ zY3!b1up=Hem-UA31tuiIQJDftNm{JMm!KC?&>9HJ&SKFAWn*@HYSb5*nwaTEGP{nVOQI_$v6_hqm~$^N;+X!g>Wi_?SN zq&{=~2O%5WDv1RT=U!&q5wBnv5XEk#G5AvSaw@tFYh`lsND_^rEs97Bk{oT)nGHMo zUSXml95w+|nnIz0L9$*+SxT#u1V(JK0EwlQsrK~Hy~=b%OaLnVkVQxlzna9kRcr5| zd5zJHF(8^34Fo2l*OG_}G{x-ENkgPX|2iX$7=;3ko^>dMl+-Dx$X`!#MLrC>9HDn3 z(?*{w}Jv+moBHNtoc z5Mm959G>p=?WE)wn#P4JboM7Zf8Sw*5i|9w5EFwE*E`9@L5<;X*}N-Qu^0}}zcCcn zy~{`=LivTHTO5Ze_IHz=16p-S73};DHQ|L+GXM)ab7L-Sns{B{gB$D-h%Hm53-C$Tw>Mt)&BhYZB*M{Zs z0h4oS&o#@!O27w$fMAg(Ng#8X$K8ikBOoB@NrXZbtOR_R0=O!I;dOb`?X|XQm_@W# zGT!1uNB0rqkMREoAn}Jn1*+&jN_v_XW{jMcL`Z(jI3t>4xLTZ{0N1F3Ek*Qk5-}92 z_(U-EPae*2!^&iPo|7D;djEvc@5V(9Z6aV+KJsb;iTWf(ajI;oe7N7M#ZBs;GX626 zD*9H3eL+XrD%#?())GlrS}wGMJ)G? zT5ue4GPujnOCS-6YxNuV4O1B5Jq>XF1vX(z$~P(H8ju2yO30q)zGb8*H9wYoGt9sU z|J%f;kz=tv_zPBIZvFX=@kDehl^NzzFtL4?Y+UyhB6NOSDoAwv_ykO1yEGW~S0<)XZ?=Q#=pd zk4#0x)O>78sv@*R{FoxV(;^iy>(^uy!#{j|<IQ}kWB~+>wepY`3NtLGL8_oI?}e&&ijo5xKKGd^FZd2Ofi8822Tw5jEIS z@=Ge=Y$mtAkDC^B7~ijqKBC>##LfgT@%=jR@yrLN4By-B>|+cA(z9dv={Lp~(e!f1 zMwGBb_gm6wTAXJ7WT!dgxvt1tHFUo-#)#=@FwyjOLm>&8>jf8{DE~-GS}_+6alZ83CU)*9?2P)8k*>pEHH={b(S6pyiSy6lHm%df(?v`o zoy%ywe_0J$|@hU4&6s+If}&iRkYn;_}<618|Wql@s4CMdeiIz;haoxJ!#8yR-@YvP%Hnq6LU*D%@$KU#XJnA2kV378_j zCW(&*u`;0Gg!2i1kV_~QcBt}VUCR{B;PQt0JdNy6{1YE6;g^L2MJ%KAUYmLu ztYy11T2-fHCyoQ12(HMC>zLYe>(v@&&5n;4CaJtGRnl0pnJz!@7%F9Oxt@uM2+}eL zy-O=)0mXTJa?%5t_W9Yp2yKPJ@JDjSz8<>O$_fKuNUr!)OH>9LBkc7Nd{mzJ< z@=1N79XC6L<0Dq4I!sC7Ze((H;LjT>Q;McXk`}AMCFRDHa`W1!)15^q=}k;T#NZJ1 zOb9Fbo04AhhTP#HZ0P-gyO|Nk%o{v{Zap6)=h4J>bMmpr%t!Ujbw;qAZ?PJ>03g0l z@G*iy6WuLI$4G+g#HyIcxQ&>VnUOot*duCQ7EcPYUsMxF&_NU=S5_fWBN~7uUq9HC zM0kCJ16?XY0nQ`EmSs>yzlQX5bifM}8$bq6%d+wmXvyLb)7Aq_LxaGj@X#WaQnq`& zq|~~BLwN)E$wudF;ikM~HPIbr`XYwGx_LrYmyOj~1-26bQCw#;a z&8xwJ)Wa;8 z+&g(VF&<4ujtjx(yUl0qne9w3&%0w*Ll_WrpP`V26X9ACa+kn(MFi^01#ybkeE&& z6Ib4*FJ9Z)aL!tFHncn3iAEFQi{{nhDZ+^4B$DuPhFTUz1f5xzeCbX$!id%qkVF~^ zRj?9p3I*V&LV!;va!2Ei=%@o-4Yv?QdneMerGz>1_GDVjK6dEtY@`uhs{kR=P{`ql z@-C$0qefN1wl|)=Fr?_aLsbGg=tTp2jnG)|;F&a8*N>k%CH1TJ$rd8V(8DKw)fFg@(^a z7`M?xM3|QWKoUYB=|+eqx=o~`Qy__#KBLbAiqU~A>jRdW5NzIYn~gZ4-dBWPSHnud z7Ajx`^k;ARe@E@%9)CXLsm2^}%Qbt?3{8Zmkx;&tnby{gEo=g!x$U+Z(Q(Z$xQm=G z%-VwgQwX8ojp!T)^20n6O~B~5yOBiPP5mhE(Zsine8+;XxG!mGFzB_4U+&VE?In!= zD@doc-Q>(S-*XX551{yE1gm84plrGlu`w|@S=^_a+G&zbXqJ@O)tChl!!9x$%mr{-I&SaeLG z#574Jx-A!%F_poq=f5}ygaND#^Y11&b zwq-kUYA256#0_p%tCh5|w5!anY%67EW@ct)#=p!=fB!Nw!+*}1H}~3zje|S?8dgR*1 z7~vldSbCD7Pz@{*7g7X=C{lz1evp>nQkf+9xBN-M1jRsv=z06*5F1=9*EDwdVj#QpF6=z3bYDc> zcZ6=XZuGJ>M{#3O6OTwsJ8qAO?c#e4?`#U21Z#-G-lr%=30ZHt4#c>4~tt zvyhUMy->>`S<93~$Gfi#&ZE;$Y_3RYjR^4hHeB`{cd@C8aDS#a(v%j{PelSra+E}G zw>ev;80gEyDmWOKvy+hpqwL}f_0^xz0ooP!0YEn&3K=xfJ%Dtyt!JFsba%z{qwFHvWh?OaK#mDAXWJ#zVs0hX9-zc;EK4JA>7I3q9n|c&M>Qgp8Gf zaPnXxd>9G2He8%uTKYm-3Eqv;#*nkY~KLt5_wb-O6(&jmYrnR zr&?)GZNl)-;$G!80>5yW9`<7{uewK?xQJb$36?ZX#+8OgQ3Jay^c-5pDy2nOw44i% zHXRY8d%%;BP^br&n8#2IJ1ZMqrVAS+y9@nA*lz>(*tKXeIEk@R9k{eSj#};lEo|q^ zsE|Z|w>ImHHH>A$Ma~9bO!9-j>K<>hF2q+G+EEc%>WUX9(MjkNC^TzjFZ6Nl1g!)2 zL{oDqtZai3%ah2$u@qB=Oj19tj<_cqO++AO@_pTF8B1JGA=iz;l?w}+Ucu0fL_k~> zd8#o+to|5^IIYA`qYS2)pGM|Gz#N_e{?GLSJm*Xj`6Qrzu7`o@>84~tP)QPsa%ezJ zAal>ViAnmNL4ADIxm*#9198tZArTKDd|e7cL3t;LC8}qUifeGi-p>13@kYC68)Zbi zaS=w;#fye53D2Pfb_cb8hCE|!<&}J{Nr^D@i*S>r1P)1h9wl)&&s#4`d(lL=#q;^5 zV*~!E;r*|-TLPXqUqDWJJk5;l`_q_TXk-(tm1Y~(qlxN8;r4=mA-EJ^t?+Ei7aQXQ zt2wmxfT8LQg*>20UqaHd;~PFW*62%(a>7jp%K94hXySVr`8e?|wga;frs3k=-Wf0~ z3A~u9vnO!i_L87Xd^$f zd#&+DPh$d{>Y;jKm0^&e*HI7~ZO(5D(U@e9-Rn(5gk4+bGETrm_Xg5&>B;(ld2V%j zqw!4W^keEenZUk@H0<8e6D&Da(P^M#oZf7F5ieua8J1Zz-Pl{mNE3AD;a9UckoH@e`n(pnUB4Sz^v6?AV@xO!oT+9rC zQoA}%;R-%K!8?sOVq6Ijy7y4X;feBHq`WOC!`yXaXJ&OVcoO`f6z#qJQJ?F$?=}Gu zdjtZY1cbr_bjf)S<EV$LW)MuW5*wD+d&52!$e~h~GzIR(&ZvTkrDw zjdeZ#reR2*80}3sF@AuI^g)DCF~oMzP5eRQi+INZfbKFBGH9av5b4-k=PbScLI?Ag z4;x!VPZho|wos7SC@_(Igk(p6EY!ITUG-a7r(kr>58)L1sHs_KuImy(?oqFSR=Ph% z-C>OkH3`qM`?$$DrddvTjm)D6{S!pz_~n~&+Nk*n(;ys9hPXE6ME)Nmjd;^iINf|& zOh4t&CFYY9!x>@GQmJ2qXVhZ-Aqr&I{p~ z;`jNRs-v49SK+9U03!U)5}(6hZf#cK-aJ6Uea?s?UeTK6Swj=!=gG(ggvxr^z?cho z9N=Q&Q0%^7{P&8u(Sl>DzWNI^681$3%S0heZP=)gD6AlV$yA&ep~9yc^5p(=3MHm5 zlZhj6=*BiBtE9-M-fLgH(ix0bmfTm2^uJvt&B98+S1I655U_7FkMoh{g2x9&H++V{ z-bSyJi3R?e$vLaJ4h)w8KvwW}`~(t-`#Qy)01m8y7sJWx&XVIMMZad<_KC8;VQPoX zY7x0ipG0Fsc`Unbv!F^OkBiBl<#ayVB-2Vxj2ckJw28C zgnT>?1G&Cq3MMSc7}y#%2pNTReV4ju=oB20TAI^NrSF-L2=|tu=-slCcWxS{n7>bE z{Fudu)e7d_N{*#MP5~pn)NufLMA80$v@)8Ta~#`p{uS zYK7HsqWpQF4BdRNRvmJeV{H9}QBH6l1bKK%o}Bccm#p*MeDiy=oYaP~w6_E^;t!@F!namJRb`Z})^JJTAE}Tb zB$N6Fx%es7{mJB=f$v;aU4r3D(w`}buOY%xcVT-~5|SEhQui0r5Ya+2p{*5I;`=N4 z=ydT3T4moPAN^5*c$GN+`J3@Zbv;ZS>wBHaxYF=wV=el#uR*drOT}a?q4P#Vr|^8#BU|8GL?{};@?yukB?=H z_RIL)MJ99kkC8_V#c3pf?;NTSMS1{gTu;8n%{7;V#a#}6=Q9;VJoyMmSAr`Im!}5q zF3WEv#l)sF7;q5aCc7(`fCx`p0MgBef*oR!qKf{Cr02^vGn$>9Y3X<3RaNSQD;e{8 z{87UT?=i$%4JXPglk!lYsaYnGsLVTVs{ou~gcS!=L}IqPtC*IEfWIQRR245aFiP%K zDR&*@VkAwIu1&<`z(sx-`;HbZ(5stLUe%Zc-v% z))YXMlor!ZhAa`+pa?Y@G?|U~;hBD!dEhmTJ!0}w3S`NHiSSw^q(h)Cbvh?^Nt&vW zJILrGo`WIk-mSDDpPD_3FGbg;BDRWRSmaWFKk6b0y=1Gnj;V>r&6h=!pzH-kT^8Xb z=DHNa=aKe&IDldgjAnG6cr@2D2@$U16hi{iVir=g*C#C=qqfu9G#M>jF5 zBk@lS-66lFI?O@{{H6rQPvyx{G<4j}j3{Qfw?m{vw`4`G@A%`Y2rBBElX_xISa!Xa zxlu>h%GPtYFhMiTcV6sgy{v-k%{t#oLW#X4#j;`N^E$X*cQ50@+^tMh#6nR0$wvWC zoa@NR^@04*$v_`>YvYUP*uwX9^P%9!VS-OPBlQp3Ye&fM5_*;+#25&LeZ|`87 z5ssdUwWSJ7aUVkN15wHHa*z6#^xPedioTJC_qgC3CxCG8L|nQnN^@bsCyBXDLfw-$Nn`yaWx8gE2Ht=!R_f{Cz2LUtSG@vC$na-2+@ z1IVu9M($7(G9e<&FxTKkEHX*u-KdfaOBK7__=V02Lj4^ZfIG~DMYtORoSsZ5;K_td zdUO&im@0RQ~iQgT$V@ySaEo6YIbt~1wOV6>?L+4he-sL)E z4|AOHPnbqFM~EcONFa*xcv8}blFdhW&)({$G}|FJE$!*80(#_5FcA^vX@FE6gaY#& z*5XUjiIj9_ND2pyaAOFjRsHS;yaHi7V87>+m$i?)+hOh`6Sf=QZ0HmNu!My|1dHU| zo$|OCL9Y{R6L zutatW$z<}`E_$-@AR@QbxQ=VSw~V(j@mdBP@tjH?_SDtpY@6M+yQdLG^wiBZtw6=T zjqEg6Mx({n^z^oAZQA9U*mAGuVB(M6cH@q*rvc-uwGL0Tr;+w3&`$C&Pa`l(r_E`y zv8mTxO!_Eyx=D(7^P?A`YWXC22PN}4ZNdKY-tM8>)5<3H!}2>k^=Fu{2&Z6Fay;Qm zz)lLF@euY3&&&_nS|D`OMtdy&y`hsX?iDr|vFsuX9p6NO)uwHU8LK924nNZ<|C?=z zWnhUoiz4t-GNSz)a>|Hu?zfMKGJyFhe2O&;e4!Bo%RSkr*}(dbQ4?JKKmOjCVuv zuS7V3ELnICm|{MM%$)S`jo`V5I*bq}o=tH-4-3e7VB8VCK)?}oD3n1(ejdr01PQt>smi&4bp^%^w<8{*rHtv4|v|3&;bneQxkV>JXlD z)21oZHo6J6SfHyNG%|>yy^ypAkrpvp-gsy;uGDpm?%3wLE6n&b^aw^AGvuHHyZS67 z%w{-pu;ay&HBP`7Az>aIizC-H4d*p$FmTnem2$@})ly04EOp)nOsO9?{lqEvVrQne z=yzfFOj?9xo}x%wT1-FX;U#B|a@g{^3oC;?tttK1cs_6B5gtSTZ4noNM6*B|nr)@z zRm#=NjCpvs`$iezAkQF)H&foBv{)HrX}E|Q6h4U)Kl~%iyxhxf<6{IR1qEp-vT#;@*DzpvGKrop zIwrH><<6kD*xI4rw7j`Yfs8zEWc;T`)U`kB+hh4k&t55^OUal@Xna>iF1QurkLb21 zMJF74sLqMln8Zp1B ziEs`7PcBbpl-~W+OG_!=h*tWuZq)=u%o4)aD;gxQM0E+NI3+2X6+9gE-bNM?I}!jy z77D>g0h;LULpqx4xji*}y^{?(BX?gTjdU#3g%6`bE@$(}_$kQtZ9DuCU7bM}&e}HSEbif^Dq^k`p!Bdqfq8UFxRUS)O5l6O zyD@9LkIXD?ys-z1rsp1M^btc$fDwHt$QaLJiuzIE?n582DGBl(50QpsK4fKjw9!Z0 zeSi{uC{*G~!ec0bFMHl8urk=^I?L{{#(bil(Rg{nlz<3gt%4HS<4BhGvoN+vewN1@ z)ur;Y1RU`^fjrsx1O+F;Xn=Qt`QD#sTrsO7h;Ptsg{&QUFcCh9gtWA67MRZa@C+6^ zaM-u!7l$*jJfCdj5%z-N`HC&iDhudR^AxI~Kf0^)%lTUQ5VReb=yg}og^d@IgMZrS z(VuF9rkfvf3MoZtaRP%xJ&mGhmdk&Sw~8m>>2YBEX9uEI@O0A>(bNq|Qmj;gD+SM> zg1X)i>#fQ)znF9qexutKccAzB;9u?RKZHXE5f~FbjFSz_KzF;I+P3(N=Ev= z(zqikJp~baTFhaJ_*Eq4_?NCty3s;^ZjAbVwQ)`eVydn0GN1@wLqd)OxwbHcm@ur; z(!e=wINmyDDeI$D7@u9bp`hUIYn9%luYcQ?w zLj4y|Mg9ho%dze~-dlF!0;5!1J;jeXy zpYOJ@dsI8V@ZjHUyb+$a9GKX1SIY3x@)l}gaCIS(0cPzsX|GSG^e`EJ{_tsWDHraql#`h#7JK z5n(745Jmeg(y~779bgx$R_|5@?%hT@;Z+XJ*C51dr7WO0-xE0VvvT-ZIo)*cHO}?W z)zEtDr{xKp7~e-m&dq?&@a$rLinGMvdUWqM=7^3ufzrKBgq4C1P{CEH0FFAvUw+WY zxT4r_n`I{|F@*dfBGcOGF3q;4@H0dx&(f^>u;E8oJN&NhBo88jkC1>%h{4-%Izs+> zZWFfBcHyjTDV*@5#um}f1t8x}9B0diw<=IY|1r{Q22={`r{~3e+?dxl*W<*ne!_|J z6Qtxz4+9EAi(gn8uPkE?{eO%tqH9gQuG=g@iRqJM;#~`Y=mML3!4Hn%uj4EQ`*( zEU37@Np8NijKH7fE|qX@I+CfurR~0D%n_dDTo``^nkc_bN-p7~c8`xD(r?9NjBsqK z`;IY3^utvKP!*~ae3uI7Ub7QUxcsEx?xAzvGu{YeDF0Zox}8B1*Z0ZArm59y@4)z9 zV>kl;&@f&WR=tHFPUiulJQrrB5AE{MI;SeE>Wv98D8G}QH^U4nuW z<4?)RNrVj%g;9b21opZ=Gwz7xYy+D#-XzG9@pH;-OlswR=JUs9M}FT4+&$0tk3*gV-?biXng>zidvSOCPs1)KB7nN&Gz} za(Ylba<jVW1CqFY=Z#H#O1z@RK1Dj<2J;)4Ixe#*v_yE)()9-ll*W5o$=7r5N zOTf7&7eObT|D;Yn%iOF&he9ehI^ww8zf4xdv#g(3$e~(_CR$Hr-ihQ^-MzjZaabPkxc9+8!{0{htHKG8j#}En)43c$u z%HlW^+8BL%-eYDt`gLZjaUUMz7)(v>NR0V%2X7l8-8+5TSiyH_z06Xw7X{$xWGZi~5rO za({`Z0=MiFMj{#wOeEwEH1>#P97FKll(cA{GSHH7HOin}6yidY?FeCb;7nYH(p}wj zMA$_E%s+zsMu#G@WL$$X?g|;{GU%q>Tn7y#w;HO`cd!7A-~wz59Jyi|Hpmi7bI6U7M0P zp9ns!f*XrpO1f=#9TODcNeOt86AJa<5_4UOIS^vtk8HV%R`9>;8E1qSusop`h_J6u zY_?N&K8!6XoY_XxkD(#%2F4iCP6L?kG8Af%CF6#aL94ZkpgRo(rC3eiM0isYvL7tD#u4jr zGvkXGJ108Ey9;8wd0-3O1^XU`p`+EhTNqnJZ3Y0{T_|MGM0ZQlF=`qqI5@-Ear%xU zTJ%!7TNzh`_aOPUsESD9Sr>S0Fu^HZ9j;8?+IS{BvF4E^ffLznNJd{!F+d6XBt}fF ze_P|6FnWbqLjcjug~FyJlqx|K?ZKpFS6H$bx8oonw`!)S&K13#X*e5SX?QbAh!Rt} zTEQb_x2G~L7o_PnFy_xHp+>9I#fBqldOwUs|=}C+B2_s7eU1IJ=F&ykUQq}JD)z{o%rslln zN15WSZJV#CB<*lYqcs~k4ZItg4&eZG_|1FmWV6T-CM#kCT)x&G#SY$~ zk9J2H?SIk16G=QrlZS73aCq)Ya_V!2*1-xdjJ9KpF=BojaD4w#cAO@lB0rYobe$H5 zIe%L9amE|5?=(P&Hx$CKDLhdgPfET7bsnVI9t~3-e|Un?N6c>Xa1xQf6s8JoWPwT; zBhhn9;**u!_~^y#N<^7#x~So(lO zMLgZZs3Sb)0ut{8NO7+xH`fWxsOS#4)fjcY!5Akz@@A_d;E8S{=@?m-Z57xapHJ&M zWy}!)3jwLr*ov?cu!#b=?4GwoVXh5k;Zbp$je5dO2eYQ$7!7#h+(OR!MN&@56EDul zooxKOAhN-v$+zkW4LwUCNvBW}d{)-4qQ$|#zKEc+wvPzdYCN08 zP^mh)dm4Yld}~r02rqj;mW*xT{<8t9NgiXL*LS<|N3_8#iXKDuLKar!r;!{#X5g9b-`w;E2+G6axJHu$#H@CiuoKkr>G43SeK|r$( zrjJ(*-OIFbO_+!^cP}1IRJ%wOR{kA>mzLd`#zF(O;eEWwJN*&#p+#ZJ-jNs>A_=ATJgQ|6LNLy^ zt4~xOx${k3L=O_c^!A2A4YFihKpFI*IIj4#c5dcxhfN!plh3-gaYsBOk09nMFwtE| zy3*Q5lIEzbb52~xNF!{+`n8W7sK{qXehBWhxbU0Hb&0q+%%#R|xQKo9CgonuEhCGn zTF74D`f?qO#LZG%Hqr^R*S3z@bv;vYT(gQ&q?6%@W{xykS1v4M4`$uG@kBiBiObjn zzmau;WW1-P*`r$gS?KMb?aw2G)%A_*xaND(R~bW4!qGilL>jhXOS{8fhFgajbiBuy zBJQdxhffAfbc>{;@mC%l8Q#S60q4eU$!H@MCbCF118)i~5rb3&tQLkQLgvy=zlKG{ zI+|x_A|m3AXrBWq;jc#(|1$Y$3|42ZftAw1nd@5fa~GS62uDOz0-G$nk z96qv5=X8Hg6w|@+bSq9e-98f$;djY`mD*tix}@x<6vk%+^L4w=9+u>zNKQF%t0p9( zHUgAHghC~*BwP||gFOo($anSN;lo}JL@Ug}xq{RYNEys?*Y9m2A{;CUm^3s1OT~St zg0qp-2f9ls;c?u~hQ&V2h5H(P#L$+a_!eQ=<@2DTz8|SMt|}$(5M!4e(uoEM7ZdU( z+~3F}rUEIBe+10*TL~-r2auj!VtHG^>~IRXp$9y1=7A<4q6HX|_^o8y6;fO&cn}qo z$F;!_?yUzKZ^Q^NVO%4a$R0wnq9;vM4>hX)!jon^4#>J+k6jCAy`LrE=|&$% zPL3_f%ru>i`UF4i<<+ixyb(wABon8&DX7?=K=!MH9X%fUEuWM7iN8*?w7|8#y#1HBA%fksL@y{gD1wPkg>)3ECkflAAYFT`&82sF)b`& z`(ATRgcAESiq&4Iumyk1%0MYTWC8fC{Ui5u(-t$11t^KKo&Ht8((nvwpmzl}puB0D zlK*Q9se7hz$2_CttNsyH;feBDq~rjS_XP&07tW*YV2S-tyYBY;N^4AUkhJ--{R54_X6_W zq2f(A6}>gnBKVGX+!^gb3=yI?UT6}|Wu;6WOQK$Z8Yanm5#_NFgw^bAev_r|p%TB1 zd$DPXFk=Fe9)BoQ!AihOD1b(UIr#>|0ar}m=XT5O0g9xy4#PabeOe_e!IVc+XNak*7ub1j8Z)my}mg z3g5HT#`T9V!IW{YHui}AB?S@mD)^i zq99tw^bq?UQ;VHdu1dYx=p)RCoPRkBqq}|!Nm$*qRxO#5cakW(se7xDMO1e{(S3%3 zaw%mXMf^4r(;~zG(c6y$Tl8DW>K5QhcW*b^_4uQvHl=Tiv>Hy7?;s_94nU67F`Q=% z?&CaY=L*905lV=i;O{inh=rB_Bi>LbVT$@)q~?eb>I8p=gJTCe5IG6UAX)FBEP8T7=p|j;n;n+6lT>Bc_{^$@AA7VsWQMKxnyy{V zk0qc=S12@5NZ9)*j3YW*@6>Q{?Fm~@Z5hpf_$hH3>l*m3nGENk#?Ham& zM(%^g8IiC(52V}AUkWypuqEO{6mczxz(%8Flah`R{IKyJ)qJm+O-dOI;eUkqoc4tb z)w4aM@9FQw>;ax7x8y!*L=od;F;8^?OpG5RV^s)=??XRsd=bM_a9yb?LxFxQH48@n ziSiSqq}3m`IqNV&Zn#7417!HsI>5;Nk5Naw<)wvyN=(szlJudYE=9({x$+eg z5%Fe+@8>Ey1rymf4wn3A@jBSno&kXMkQYtV}SBU9l(rD5XJlTT3-4p zXc>LuGUH7U#rsY2(pCwT6B^zKnGbq1?pww>VNMg)MFK{yg7bhP{WeKy+vk^J{PC*a zF~*2TT0f^1nApBcHV!{FWrKx}>Du>Vh?=Ks0u#~qNyG)0;@g8Z7B)>XJGc8^V~((Z zieQzmPJuyken2@Kq(X4yu8!M^$V#~Ea3rMrKNAr#NCgOyhe8fdls_b;-kxHbp1pn> zj{A|Z#=Jcq#lJl@h~oV*d2h=Gg+@D=?YP!d_^^s2duKbVqr<~Lxu2MXh_%M>tz^wJ zpw`1n&rhl6PS8X1)sHmgEWOF5so=EunaPUiPowkl=1C;(=T=#WEapMlt-bwGpNW7r zOz%1?`8ezMj=5i$s+h_OfO=vax*Fi6=a*|TtJ0ND$4hm`Oo=dYA= zFyPr+TY!0ds{5NM+TE-uAWmMqE3-(~->Iv(hN<(0{$UzUXx5Ny@lu5(pS5v} zNaFb?dBTjC2cPZj_x2$Ws56U&diO8mTI-Cs2qm(AlPqTqaEE8x{l{n~m>td5fWSm_ z0M^>|R3|n9Or7SIkS6SM_-ntZJ>uQYA~v_PghHY&Pf;A0%B$r*2`2+XXcBXC@HB!V zFtfXYNsDN3hAUlGvNicS7OA@;)vbfNqRrzM9q1t*4EJ?#;MJ8(Q$%$Y(Il;Su}C37 zSEe9#(5RC7DQGfQj@(sDRm2e9UE4G?kVw~6sY`a&#oX0#L~9D!lrezM^^j5=^YT@9 zph<~vZHD9?s+dqHLW=lmB<6G>cp>;q=z~by4_7zZ32#xDE|lC4c`y-PgM_lFtM|b3 zD_quf*EHsc7}(%u@hxF8vSeI~GC~&x2jV=uhNBtSnM#a&ka4fIiz-5i?Aj!wRZ`sl zm`xRS9pj96!1Ws-GmzrGF1a~1%PmMI`M0lUoDpNc0hE?EGl{sbPdYjrf}hu1yFLYr zjZE+x-@w=-mYf2fo^UACgX`XJNHO%Pa0O^lyYFvgoJX@xCa-RmmwyBz>>CrCFHUk+ zbnud*{i^xi#E2qhV-@F37x2V+Q*yHRD)x1m*&>e{*J)xScQcc4cJsX!AuIdTN*+nO zd8m%OQ)H>Lr$=w5yM<|qsE&FkOTZK7EdwXr=4Km=5T~Qy33n^wjOe>^uJw}+ys5{7v6jdEA-1y{bwsqM z;pzEW$%gYHy3`y@HQWd@%P3Qr5MeTTJEM-c>-^hPfD+H`$;0{$et#NYeVz>E?qE#E z;@=zI+oH$cV8n6=S?GSur9;BbB_vI}qtQg%P+c-4L5b>4q{egAvP-WTDkkIIK+`1$UGYMNHyq4r>#VxQ-?l=hR`aO_qL>5bx|jevA=E ztd0b@+(2H7Eh)!R3a#U?E}LxY(s7>{Z9DEbQxIW11r*-}CdsagkRm>w#B9yEj&f&j zgoO^KWYO}oJHg2RyBOyJt`wX|1^Ch8a(P&4b3S*H@khAy8^$>Tiump%rmc~?b$|H6 z>?|D@n=fAJV6w8xm_m0C(-5&>XOK065NftaAWiG3sRd1z;$IIhheTEZETTADmPN)?Lkz%jE)M%#&{+2YBS5On=*Cxs#h1LSb0pIyE1^hQ+CDdcMu3FT!0AfTb)HO6mE^ERwf{^5C|q-qE1SI@!cT_-I%;7@l@It`>@W4% zJ_f@jglED@>+WgP5pPzCs0WZ1>j@-j8zu2=x3{43%?c|3+ha5t)EZu>gg}~3qo!Lz zQ$=c;#Lca~|8x@*VN3)pRZl2na^7TsC1M9f6azz;p26o9Im5UkOo>TO3Xdnsoup)7 z$oBrAzhN_;90KMyY)&>BWBqDzrqkWyrj0sc&_!eFcY0_JfYPl`#FmU*l(7ynYTx4I zAKaOyBx2iRK%3k_B!49#XHm#qAfz}&g%5}`1eK%y?7 zD7xC{gwd`L`+&V{4hs-ieQcYOh;VZIzNA>mJ{YX{FC@Q)FrTI3EUtWsK+04lcelp)1DLuUNsr?qk~jF<(1XK;1bMD2G&Xb-kNZW%6e!&nYd)U%{s2kP*0 zmj?Wh>bGYCF0-y8ftHXt3ZcDT6^17F{0dUt&6|jb2QV?REkKHVf#iGxv-8d|4yMv! zT&{1d6Xx2XL#ml|8kz_%A|YLn))wspTfW<4j1i$3_SL#(4n(mol9e-Ezrf;uWAE*F z)h!ue!~_&gvTE<2kVG^f5g%q4HxXI8KbW^@Y3(^TTyjI>jChv=lJDP>Wr8YL30S58 zI*Ql}@9xin6G>ArLx#WaVxv7Cf6y?zxEPs#l7Eq`tWqwA{ah*-%e+7-Sr7D6XeaEZ8tA~>wrYib|b zJjofpynCC5h~1&~?V7he0TuUs$jt^2qF?Y>Gkc2&CB3a$5yHXmzIZ6 z3!7Iyn+je5PRO)N_o1dCq7rITs$>vF{IEb=?Zn_a{%|9XaE<3-R0H`-nejsc9zg-Q zy|?M%+UEG4b&oU=F>}o7y|-1s((ov1;LetCc8<>@3s;=dP3@jOd22HHLw&TdImtcR z*kdS+p7d;v5T6syhxR_!bVPV#vLMouy^ui__2Wog^1KH3?p?a_o@jc`!xtN@f;{fzwpyZ+#3xZA zmjkjE=M2(9aF?QcvWbaU{;V-L1DNQZLb|(xjxPLsSQ`^7`*ad$qMI?_9k0ThJzT_E zk9(@=xUjiW0`%ngwh44n`ZOw~WgHB??TM|oGGO=bo^FyNJmvvIPbd`ffFgYcN$EhO z@!cP{cF_jMei|0sGmSCgVI%+|tN;_+vjSTfQo|_PnHjlf8(YL&BLIjl6f$U{dk*QY ziJP+}!~qknKG%pQ493=v9tyLggWS-)ML#0vutf7b(r~46o`+A-|Ct;>^?W0mU}dC4 zz5UA!=z+zYB)2NyiR=X=Q}^bNuHT1UGsAsY-#s2>7 z7-kr(`BGEUYgSW)m#*T)Movk7870e)Q!o+{?~Np@aA#r?_Hq*!G5myW@eDf&(r zf`1L+i*xpiM}6cuc&$-f`Z>FhMD#imaUB6qdE(Sf2R69Z8~Ity_t#zX{7od%^#^Ey}|ER-=q?y#+MgUMSQ-OUT3 zFTB^i-I$MSzS*3wB!?rKcaVm&CQE7K&FWoDW#Vskp^t;Zd)Oauv=Or=gVP;bsSaFP z-bF23pkf!td;}qi0k3X0JmTFZBx3Rqu%sgts(~fqJrqH!qZkshjFV2@Ys|+t*L+<> z2!O=&J~A;TEgP*~+61BaQ}=!&j_~zo@pRkS3)KV?^Z^Pw5`t`{v2xLJgk$D#KvrR` z-hI#{cIxZKkk_z=S<8}986BjY*4PdIf zP^dwcjE_(Td+E@=ERGi6PTE?U;lX{>#6)=60%WfJPQ;g_k5Ljg{N)@LnwKTP5_Di- zP60oe`?v{;*u!RkQe>szU1^ozrRNjWQ$7F0+kNKp6wXe>=6n-!8O%RZTq*b@70@qb zJ{f-yH2w@LFdd|M_>{3nn5R`PnF63_KTTR%;Mo{VINsf7j4fiws*1n_h!{Rg1{%QT zgdAVOy!oxM&j%53B#89TQi@Iq{iYUr2 zlJZ)7f(Spyj09FTGUSfjmy9jK`hUo+PA(AO}Psh>Y3aAN#A85zN(u!!A5hjc?-4Rt@&aV-AS@fF#?*8ZkMl zO(5^VME8Bt(X{l-Rjutq92*M{6edXjYg{KbKUu%-(>fse&Fy%9TInZ7 zkt6oL?Rd1+{~2+FahYPMNu|Z~Qx;rOen=^~nC-rg+5V9+Pw?$D_f;NER6iyaw}ocA zd?b8YaX&GxW16eCs?R5iIDSeF8jrL2d{#Sn1hAi_^!9gnM%&2!%vdApHsJaGzY$zw zeoiqN#}9gDw5J{Vy)=Z+@)xEcVo^EixIBntmePMA815v^PQhmy(Rbbq!BjckF&+#1Y<<6h_o(ahFeB9#BR8TavTC zD90wj!}mMmozS2|V8rCmr14OEzb7AlD$)jXLfRj$jNBiLHKJ>rm^nB=iu#YF=E|j+ z8Ft38`;$>lu-BU3Tmuy0pGkNq2x)lJDPQB;LmXJ%v-cNM)5W(N2InHU9(wU&!LwIn zl-$2k?m7_GPOBFIzR}Pm%u3V3N__(fwv_yxN*Hvpuh-dwT5s*f|Ig|y zuJpKN8ndyETa^j5m)GzIBli!JbY^pv1z0Hxg+>lZ`zNK*$zkaMY3`~^n1DPg#p0Oqg%FW10Wf`SE}LJJG9QfQ)q1qk%8;wsIYq1f3oQjdDqaxx`T}W+-Cmn+Wh}HCMmo&6{2GbTedgg zOl>R@Zo`qkIIMMl!rDx~ ztXCi{rk{i=@*9)Yb~@?j|N9!j))%QemZfcs$*=+*Z>0Y=?$VJv)FegJ&BV2c1X09yBQa;w;e@8G+c)euvbBiQz1(5O81ce| z@9XA6Aq5lJ;Ur@}js2S&l`fz+n`{DQr{j(=!ier9vj?;SOMFL?kF&h;nv_qfwGGCJ zHd(u)j5@+2W^lS$E7^iq0bE*+rWQK#!+9B7beL}oLpQub?ik~bm@@Lin zR+QrP7JEF5LD+f@=|3e)_a!Px+d^sVZo*r!u`{!}7~ZyIwzSxp=`FgGO<9ENEx_g8 zxLRyUIVIFmUFWow#~ApoZ>wpEaBvx(#8|1&^`+=ib1Kzvk`(%?ZT<)%IOKh=ryJLVS!Xks z0g7-33AyfVKJf0+GRV#_vI!%0Gnv3dw39@f=~+_eLK^2W(qttWp53&uN6hrXx4Z#q zrFwYj*+o6Mn#Lf;)oa}RnMVI#)wBVM@GKJ2WXYysP5psxw~^ zS9a4ua`3i0+XyGL-7uG}*>jL!iR>JbvD2z_=_6_vlW=6rxC9~F=Nff{jbw1TTPsax z)(f)W(sCZPu(HET9Y&eNNIKsHOnAcK-UCLim&gN(^a6mc93ol_>^$do3o~dVdMw$9 z?b=2g@r)A;f7g|GV!e>J8hrkFr+R+t1(T*Fs$Iu~T!2cV-7{fu=b%MkTGqG~2@2Qn z|KxHlnN-eDC4FjngWeyk+37C2t|^L`lmrk-2?akzFhtSLlJ;oOT5w=>*dc5WSS*}H zx09#dh7me)JyX@KuP4J^OW(>u`T{gYYyW4Vgg7^^H2h*f2CbB`XyM_<$`X7f}g*YWDAX`+kp!h_Eu& zuz@!<^A{FC_vqKX!!9+FCH8j-{m2@ssCk-OSWpHK*>cq@FtS$WN@Z3s%-5Cjk2HK4g}zN0*$vlyfj2 z!u--AUinrgJCdJI`O^#93RK=_dLpc}WWl>ylX|FWh;W1jFewOy8f3|M7-jG^D{srjP~#qM;D`f%2`51C?@hWUh+?4EfFMTmbNdg%h`dCc4Lwj?KK?oPc^a$hIOs&Vh$96iRfu0qK_dv{T>h1 zNj=@jPQ)KH423nbZx1D^XOM~y&gWliow;)xS649x@PF=^#pt5woYSl!fKHxsRuW0wbEuBbm@al$$!hl&m&9a>%#Mz-1Cj>zj|GWCAt@ojyIKgUAp^b-3yK9xaOOxc}svJ zju(+btMuVy4kRmYCAk=0Y(xmdx|EymynJF>UMPDyB)W|v@##z*G-m`Uux75 zqY6iHAFAWM6&b|LM+29Vmr=F_80C{H+$)VZqBT{7J7o+SalMLMSK|$1Eqk#A$LXSbwQ(?5uAx`Lm;8O%_l5f! z;&PGP{AOV>W?H9j2G1Goa(~6W)@UQ#Q6*T3C|#{1lg8IkBb%K!HkPn@i}a+eP(^Se z(T?jZx!0Sri2am#Fv-eaD&b4a8z_cThJ54~dQWIr-S6IL8X^Yz`na!vC(bvKlRZ}& zJh{u~@6!3QZ#K#ZZ(ItYTTY7^M3KIQq_+U+{#hJoH`6+Eu#D`uc;i>}A#Cn{tC2_8 z?+KKcC&Eg>+o*u+N4con*~|$YZ#VLY^`n3y_E1ouZw6At?;tTp%diLLbS3VdTIi_) zk~H3&ZD}81&%M)hM67`ZQ0WMT1_sG`7iF>OWk!^&{q=5B5z+K&MwSeoINw7~{A5m8 zl1CElbATxY!ilh>;Jrp1@ffO3SSmq^`+ekQB)JTOUUnO24;x5C=1Q&ec@s%IpC!*B0Gb$t4kHc8_52}GpELfm zoB7umf}Y`#y3bSHfuNyh!)MU@g3)tDsi933<_-Zw*k2?zA7*~Q*LKD)8OenAG)%GU z2YX_P>&xWgn9D87rwzxw8SH>)^#;tr(niZ*jO5MJJ_Sr&5}PA%)IXZIKSS>fsG$I$_wz z-!N?vuDt*zQK3+SEE(UV3}$Ng_G^+%k_!;YM~slOcyx|=!ZEr1mZ{m@TvH<%;8-ca zQ}?!e9E+5Fo60ydEvdM=Ne_$iD znZ>t_e{kP3&Img(An8GbLKUn8e4hgFQ#B9k>`SJk|7-jaNvI}I$dJU&gNpqJWY0_p zvU{G#@bCVg5l8IDNcxD$Tb_X`<{y%ot10E_)aq#4a6Zi4dN}#S{m5k8tNEFi0QDeB zSEoxf()DBNqTLq004+F90x_?HSp@oElfV0ksfp;p0!&Us2~*TRB{gSCsfpa)wQahE z6b#s^;eKYc>zgZ~+Ct92iSg%TWS70UGg|ctiAUUeu5(W};#@W#`S%NBj#!NE0cuvc zjgusc6R{=Zmz2Q{vC0rz8jiS8WoZffa(-n(B9>jVV5g<$4o&?V*9ltNoC>quov zdAU-5WBe2RwqeMuais<@(fyWmw1Co{ZV{s<(o6r&s3Ky0E4n!kBa+{fM2t5W zIh70kP1H8s818ST;)MFD3e7NlSsFqiK@!d1NyAP6n>LCEPqDGKeOAVQ7-@ttl0u0# zEp7=%TBXPm@lT4#3`AjnveNbp_qcx*|9SQuiLNmcXUq0N5TyM?2!d+~siH??^wc`4Lxl_ATIuZeUJ zPvRM;;Vh_)tNiV*U=l8DmJqDq$(Hd1Iw`#(m97I@^)5=zoEX}Pk>E-uE@DVBG(8Y2 zou1tQnqo`Im8pb2`hC5b88_Sq!xJ$MSm0w)DZkVOC;dKmm9^-|U`a`)T!AeeSEUZQ z&Uwf#1uZSyyIz>a8LWPySnkl z#Iz(QXzE5|;3eo96qJp+&%QM>1+Qr$A|8Br_xwx-87gYuNMcLDwW#1usFKomxA&HY zZZ-4|$kMa2IJ;$WWwhW9GEHrKv1aHB{c#C5r^%GovPtE&sSQzx@>?pWx9{4agI8fw4BYjMIij}?5F!qR9G)nz zPfETCTxVUxul};Tf$^;QO-NCMenX-&J|Nv`oNO&dpad)PMn)B3^rs-YskE5G6!DEo zd`%G32x=jV5B5aCv3V1tJ0Abm(4_M3cn(M$HzfxLS8h zj%dT+E+{K-=d%Hd^yVa`>nWYhum@GkyoC`)7@akvTY@F7Tat@UZ|0K2TN}?gdz6~|w=YS9bI)xPXs3SUhSGX&NhsV3ynzV>+BOv*gHw2Sbz)HZu6u|L@=0g2O zIAob;PMX`9kchR~6mgAP;XIk--JbF|`XtBoO!)zy&DQ6Uad$9D5v!FIhyIih#d`>O zX@Pg#yn^~#8{GT~_w()WiSXcb;4$<@9}ssfgVJ5d7-p5h`0#;ZP7?2%s| zuDr3k^IAj%ENKXZuzHdQmWaDh1Uo8=h-D1FKm7=Ak-MvrM^wLIh}}v#M6tHWTAcHb z;dW30_(P5J($Dz=nCR|Cx@=9eQDE3?C1Kr3J-Ne-IO1`K%FWeuHLye+P7&;#={n%n z(7im8a+SZ`5k?#__T*d-c_0xTNg__NybYKM7vu6wA3Ge}QN|as0-M12XDS=B0#eLJ zlbLT-AwPk_buc5vN*bJ@?igc_aB)^`auHrDk17GjQUD)xVJDdq((4Rz%;q8DI8zWY zvoPo8VkU8Rox@PY-6R`~H^#O!5b>rlFgZ*SZzM6-J?pxE4L0pP13Z8>{$}K+ zOv#z}T8&u}>?A{#5G!rSk6lF$DccmPB+Q1ww5rp+c%=_V+h!9I(bESI=?H}aqG-2} zb{*~vr==kh8Ln;w*RSs2XUv=W*o8;eQ6GKM$;Kb?P9|ugZ$g%iQ>cTTV(7L*PmS%R za91)majOZ4XcGZO3PPcTDe6;6&5qyVLohNj8#p32?rEG8<^j!-8wM!CZ6u^~zs?Dm z=yltTJz{2`g6W>q;+C9)F~OFQ(

)@cgC^@8~<-$Rj2Q0YKED;J1*ZTl;p9jv=&` zgvwiSXBbn2W53GbE*Ra>PEyctNgSSu(-e6mmXVoe+K3` zf(3=SIjk$VGmSQ4`e5JpEw_{x%3#HR7Wtc}4?d{H?KUA7KypK0cB!Wio=oyMwUWU> zFeX=8!mG^xGd+_x5b!VVY||An(iNdpPsNKld`UTnQrPn+=F*nVpu3tz>SIL_XI5!B z-Mja&QQlNVL;+S9VT}yZbslxGFJV9|oXeFyQhtMTjwQk9joZo!wMOf)6UaaL(%?v1!j~-s|oxS+kn5U$kcxRtAF-D^?@Z6Jgi~Sjh>6Mh;0EQ<^-nvoo!Y_z4RL2qVJ^`oOK2 zkO-@!eq*0U6Xjk~-UhV+gSS4#uwOdcXY3K9Tz74$h0|rV{Zzzvwa_9NiNLtNRa0@& zrEZc5mTKS<^07_wfziKz%C7C(64xOHejZOkf9k!Bc55?jfKnR>g-TpWxDO>1y#0HJ z{aJ3Uysz;_+-}V%n_!9Se&k~H=1uP~)pz$dz6sNqFo_Kqy3bJXwtN5-=>r03J~L}L z=D|JCNFzp=`b@6@Pn-`TC%1AGkG@;#@5d&wSvc*w{&+)7Tt}|!9&93ZH$UMlsO~;{ zfd}l>ut?fNC{3>NuzBwkxHA|F+)ncBKGd{C3`ij^MLI$OK`J>&5kHK?>|pYvE@mEB zzCbz*_i*Elc>47tuK|kn5oE;=d)yZH%Wn-QY4SY`$1Xh5s3UHfNJ*oshZOmvNPZ;9 zY4^*WQgPP8=KXLQ3_6P;ym>D1f4E1Rs0#tnV2MPhDygHB;Kxw#UBOm6kTc3S7!7!6 z$fbZ;?PE=8r&(zc+P55>1BhkeA-t+1y4lQdsN$2<$5AS$&^bT&xHI3z(x!X7DLS!P zQMD&LffCshNXCFJg<;Uwu2V!t?ul#R$*=Ynpv3be^5lFx4qL_jHlQaP)r8lh*~epm zB7909%qBC(xP$J=J=F+f=2*!_&oYj9o<<%%E$;wsCI9PmyFDyiKHZoiOpD~JYBp79 z;(G@9=sWil-os&EDi&Um4oPhGGmSOEcOGy=8wzDmkw1&%?CHvx8N7@6*~T0($EZ)s zATTjLhm5xaV|tpzluofY6`{afSoYW3$(=vf_@j)e;U3KQ8GOLNfhY!vdLBisgDA6$ zw;)D>L10?#K!1pp9CRNe_k7b6;XpJr-vWX$UxO_rFQ5{>R{8caqHXm;f0W% z#PuR_adQUy{Mwar<#eUD*uB{JFT|G`difM{ZENL5Cy6hiM0K4Vrehou!-m24N2!frTz`jw`u+pH^vUE6cwixC(l^i>pkD-c%2b$J&_8*xr!_iB?D zkxt*hRA*KSGd|2fGtkoU8tPaF9Z6`!CM@-I;Q&OuHJ-Y{{E^WKaS!gbCM3d3RuO1X zi!2?lqYnD&Si>bxExaE{CBl^YuQwGDt*D5lS}9(XtJVl4<_#3XZUtR*CbPWqdc4sD zL=2vpF1~;xo;Q()*2glYz1{dD2COV#bEn`LB77?mlWF&cgQ^Iv0c0eOrlOlSriPW7Nhm z&L={dXG$vNhC10kso!-UH$@S>KoLyBiWdtElJg15VQaT_cE;Q6$KuQX82|eE8fl(^ zC2%79B+1zHI0wycb5)d)`;;+87^k^uYza$rpC%psjb=m_UDe~&<(~VD(Oz~wZ~{xi zXQ_d66}}z$WYn2lUD)J_cW>*g`~SzZV5uDfl84sB1TiLX$k26gX(!m``Y~in2|SB8ltE}NN#$32KD9%=h3wH*N(x_`K}#qaQIg<>-CPR3s>7{Qena)7$8nb5gr zp)(+0--Zs)HoOOxHIPd6*QuJWRaw*vI8jv}+Q?Wl#xYRXZ}JV37BPYuirS8q3YcR4 zCYkwOGajgmrM)rUfTf*289QGF-+;bl^brvr36#hu!b-unsQ^DU*|HLM*>_Ap#IC%2 zUaU<2on%t6R9qfhGQLY0oF9~;`qW13y!Y>!kcjD0iX{5B}r)yWpn-T z2nF{mql*}i>T>-MNj$$M4}Q{3)5rrEvf*Kt*5<}z_Z#Dk=)%H!lx{H;m|n67D*?Zy z0B$U|XH>Z{Xaz42){c5e8jd~A?sq0=!mwzFHBY_EmxinTttsw+(x*I+6#kwHIb_G2 zVTIFR|6tl8ydZ`ddq`G)AW?s$DB3KY{#>`EUwB&KAi+PGga`{(U)IB_!V=q`$;MY8 zIHEe^4#)5o>Wv9U{$hL)leH@MQyx#8e+_pCn`&ogU&ckln1H_-W5ipKvC2wF;`%$e z?u7db;Y;2Vr*NHYc9o^hGNPY{$cDP={$UE*_+rBlQo>bzm#*fMtYp}v@}E@6X-gqT zUUK}%zf4fX`bKSjJP#(mf0K`GtQaD_jBUK`KSsB{`ThffZaWl0#5LhWcmRwkA0p-r zsww`jg&+LiT@L@~xA;fgUWFNz1ryul$rh$Wv3fm>FQ(jJD*ttC+3}n%hYcSMrJ%>&YbYsuaoS)dh?dE&b{amsj0^ zCM3f8^54~--5iP-u0{q;81ag7jm{6?-rMD_ZanLoZ#b9$y3tU`!in%2B;>+s*graz z+cJuE`&D;MqutU>8*oG$3T04{UyJ0|17>({HzkP}5wGnIGS&z?GXRJ+6f$U{yEf@) zXXYb^5RQv2`o?xr^F7u_4G~LR*Cki^L1LAKiz?SMw)M?y;n`I@NDn8%>ywamNw<+D z=)>FwE?IX2htL{cd715a{0KTo1!NhiB zvaQ37rL&{mnDh+yVZBJ_rlUfUH3;#pH!L>%@Z@&sstT!hsO{7cMA;2y|$2=NoUd9jZ7DgNKq6kBT)o5b8B^f#0 zO{d1`C;Dc%)<5q$%M0#SMm}MZ6s9OCh!mv7^ivL9V%AX%E4}zyyUuKXzq_>wh%gWX zgy=&dbQ8i8-s0X~Ya0KXLzdPdJbiaGrjt-J4F>bbe_ywme9mBr>rUjN55#swjW6jB=7ud! zS+Nt!-Ps5u-gE=;tuAb>%0r6xF61o_P|Pgj?rM}LHQ!_X07WcuwaCR@CBH&E90`YC=7VTB?gUd16DET;Hy#cblQdX7y2KZQ9}CSlAWOuF z6mcDh$olM<4y^4?GU5~KA43>?>YR3h64Twu6r9VnRWbj&hfzdKZ_3VPk0SK-M86qy z7BWF#Xc={umiP5%G1J*#bPor5uP$ z_9zFXp4)DcA{GGxhO~r&8Mp!z>1iZow3f}#Q_D8Ve#2f-%UkYrQxIWC20$qYg^WF0 zM34>Ng18j7|dvwX!73BCeQ!F^^w8g39|KnkM1nar#bPbxH3G(=`yIAge z1mAaFdUD;wdAy06fVTOi)@71ZrWYG2BikBYT76G#I7JY$BeMKjI;bV= zq=RRFuNxV|NzDxPdzpwOt}(g7dr@AO+`NdRT?S+JugXTp)$kRQ5a9q}V_wtRWCf#y z?xj#VitudJ)9gxiG;u0|-1tX)B=v4Kq!eD0Nd&rA zoKJw*TCCTvnv!#xA5#FG+;>zGN!=w>cL;dQwbD|g7Q!W9rM8%}3}=4yFX<%-+DGo* zCU1g$XlQkg8pdzca!A>IsEm_GxJY~ZTmCov<70PUQ$Vw-=}pnM{i{*{63zWcL(3-F zy?`YUrY+(!OcF@w?r(fk5qv%oGtKi_L=iuL#B@UyO-H4$<5>X@G~NiCCZOm}Lm^nB z0aC;dA~Cy&oI$l;DMBA?lo3^1ZAJ-5L=Pd+K_Ci8frpJr4>gtuL!p!_Bta3y!$`rW zSKM>UK^$vp`&Zq=jV!`04KS+3Pzd{Y4O7&QAhn!Nm3vI*(Cv-fBaJ*_j%iW%Vz*Lm z>#AVI|0wd)MuNT1*P;azpBpN6u3!K~Bqx?pvGY#*@7$wJM#O*?@T4LX>cJ)EF%)xa zi1D+(s!bYmFqz?obdNPH6Z((fVJyN)Q}JTDO(Wruq{mUx4In9572bm#-YY%keCA;f z?(rsLJ^rSl6RB8O&cliE31qx8(0B+d?f?hRfOF7AMg(lz#5180J@1Aq%kanzml^tl z6al7Vce9QF>>o`$i{w3t^7xE``vRsG&Vgg>7?S)4Uxp52c(RE)t65YDPU=cmizL$Y z6l&tMCLQaugLv{eIUnPxrt17=RViT4+aY7xA(EECm0te)y% zCE!^U5Y}sV_h-?*TVZ1Eo^8BqU9T-5iRw9|;%J%dVWS@{`IzjAd#>?CjMCnm9bCCiilV$*RUzSPi zoK9zRFEv#Wrc=cM;AKcLzl_Z5z-(^V(1m9Gx-d}O%S}LpJIK%)ow|UQkXHmD3aBWY z6XW~!#Jl`T6B3a;!_bn(Qa*zwi!CLuq7wE2>3&EYc7nyPz1R}@Y9pWUPDX5rG(=Ip zhLp6tV4%sR(b_@B#?Iad&MywKxb9wS)DhjD0qMT2RE8D*>&Sl?_=_Bdj1_XsBJ+u)cJQK) z`gyAf={5@~;z&^OVgsY(zKwF}9l)AaeUd}utO^IDz1?I^urGr73}Y9ddPfrXR3weG zy@T4aldEhe=PT`>;Rzxq!vD8`A`^l*eUN!e_ zlX984Tx+PL@I6#WUG=3vur^SrIPuB2cBQ47y}|fHLUJ$@v`k*gk07$KszG-v4Tk17Jk*A(Gqm!tPFl412_>^Ve;vnI#xBGK#9EkZ*Q?;|X zK2k(!N{jUblJqf3qBXrd9QF6hC7OO(WU}9nn~Vr|X!>2rNQ>zwf{OYRq^7-KF*|f! zCVlY#80$&RH(wR2lVXYNlO($q$*{Mz+ga|z)eB#y`;_rS%uVtuRe2<_e3~ruG=zXS z?jh7D|*OZ!Ii&C(ej3tVL!3S7n7lB$H%*kuupkhV^Ky zieZ1mB908fxmcKVa8~vulN51~PymvoP^dr^{g+A4z|17_D>>(ZvHUpa;VZ@)u>w{R zJC;Ke-&e_ZEAW|bv6$b3i2w#e_cf!AFb54x_iUwVXzBPmb#PK`vCQ@XRwRz7{f6;J zOyBIgzRp8Hv|SdYc)v+rzNncco2?AQ?vG{`_qcBvYs8BhAVeApSOQA%MEPw}vdS&h z2Ys|k?lDFl^G!IRXNn2@-FJ*UqRI_S>{hCVmX7aI2YY<07W=>&A7A3WX96NjIs0-= zoqJ3%f1k`8{%TuB;xg(p*W>@1iZk$)26Mm=rNTVzskJr7ul+;(Qp|lyV3D$Ot2vdO*?rhC&fi#6Ko6 zM?S3&vmbABcZvIn(M2r16jqrHMGQX;44Ks(*5J>K;ppZXtXkXwhzNd80!ESZM*SRS zgd;7HS*(RC$)tvhh*)G!yw~m*Mjqj~j*eu=lStezDK0p!vrba~Y;9=d!*RbdRT1`e zc&xc+96Y-f=#ukm$_b0i<)wJ8!EapYFEU=$PuzZEk~+;#iIUc|5MSbx$lp>VM_a#u zPm;rBs<*=mW6)kZ`SVjz1<(C`P-5HaEh zAkl}y_A;vI|44d9y=4CoYrSDJ#-EHiB04uiHiD{sfie*t0qWg zOp*Va+b~IUuM8LLmeq*n>FWmLQ~6P&QtCGPRwZ{-`(7VJ*w< za`<0=dr^Bh4 zS2DT?Q?x!*D|gbxmC4CH3`Gx4y`K`%Y3?c}C1Ru~Li?_%;B=kHr|P*XrE)m)-8q_n zE?^z~OM7NGyNc6KlKoN#n#i;9t%eqw2bIM9rKyCk#3ON6qd2?~wM`eB+_w(g)lJf= z&62E@PHeLRmWFFkLtGYM}hu*7$5^3fGswTU=cioj$YE9TfZ=i@i8V-h0zY=c$R zTS*`MPUT7xX}d19u_;uyAGjJ3!jJq6rYFMw%R^0Qoe8>xT%SS|_KBr3{pR-zw2p|jD;RV1P7CVtD?b>m)}_K_uCmq#5)_lE}l>@EK%K_RPub;Dbt&oU=p6M zX*|_C7+pl$O>fMS2KHIa1i@W;WOy=11<1Yhg7EkyB$lY|Mk>t&I5~pnI!zFR)KrJ9g{_brrhp~3!^y^JK;6D0zgGh%+VW7! zBaA&_8eoXJSt}LxHD&Ro=twGJqs{x|(J##O=5YY%QKlinGOzc_2Rv~eO-`CPVGSc$ z3>o>1#>sY34My!G+U*#VaVEak(El53V|-DTL(-0=v_nBris!6KaxpvHiEX|(EY7*( zOkl(yQ3BU9D_xyTDZR&2FP#nEj7mcIEf=tsTfKGfPB4Yj%~hC(lf3+;kZU2sAVDWm z5W}>q{~YZ=z6u>KdXg#Hbr~wkQb^L>DM@Pq$!b?vpSpOZgS}x`AH9bO+0-m#HbGIP zghCNg#Op~+7e@&G*pkK_<9tYjxWEUr!MGzFQ2|KYp-_P;`i-Pt2m0bHHAE|5)eq~# zXuVUWA>u(4(WIq#u}C37o9YB1N+%?S^6uKrm${%Kg#>M(AkJrCV-9*_jsUy#2{ATU zK^V>W`PIp$CL%jY3Z;rkixtQcaSBB+?1<4G{-A;+TGaL+Z8ZTAop3H_q!LfGr;?Ty zU1&xdW-z>BcNij+`rQpHh%7R&Pk2X$*I-zl~-?QeYxFi$ru2xZZvwtlhbpzn$-QcN?^4e- z1?SgSZkRBZP<892t2JCwcpeqXzaRFC`E_c=n)p*^kwn2nMMN#zg15w2sSGRr3&_t+ zux_qU(<3(3tWi12oAKP7Zhw3kN7 zKBJE#|dyMdO{B6USorltG z<}ZaNouEs|B8Ak{F$`@a3or?nK^+rOv;)#I!2s(dS@ItmQ$+3NgC0{9p)V7i?-6T^ zMk3GP!S)NB@rYFW$-}(Z=px>u0Hj(Cg$h*BJJNGR4vx8y_he+05qDVf%q1wI7?VQ& zvZ5iZIm(I=MLe|B;?>=y#l!%%HPO8!We;6m0j3v5`7HMtbA&B5aRE3$>VEf=nqHrL zALg@bxm6=wk3VU6O#*^$HWYl9=;1_oNg&Ld4=V$nHs|hbgm-TythGfvnCR|9y88Zr z`zz=~zOV6Kmit13<7(vS_OK z?1d_LiFp9U@IBs))vis*^DvwrwJ=kir(v)WXG3FTbPqHg5kqnhK(h&ubWA{&ga=VV z*h0#%A*La~NeVn7$UWGoPiSrixoKVjNmLIZ6<@Ei^Vo+3Y#q6W8e>GKWjMN1E163= zpo;xrWWO8O>pYRka$Xz%j_t%aKn7{q@Ok7OZki$tRYR91D{XM|I^-Z}V0Bzl`iP(u zd;W_F%W`FJx+zN;qKFXb{zy|A;qyag0l)0rkfv-Rr362Ug1Kci_Xw&EmSk?mk$bes zjBxxJX89r4GDy{9sES=`-jO`x55ah>X_&A^5Z>;3H?l_)-{Z(f&u7^Z_ECdoX1m85 zdBkFAfYbelLTY6fuqEXQltLeRIWBsx50Cp3jXxsFEntX06w*Lz0*drWBs~J8W)!#P zI)mg&mJ^N9VGy<7VB{jW43V3qX1jp-_P;`e%@y zZD0-~I*&R;jK;W!MVvNHZ^g+(&ot%;6RL2hm55?}7Fj3u@dNj4V~)7z39op#~7%{M>v6gqK=j^pxLa75&uq`<1`dL9L@18I1# zmC8HJWB8WP!@{hf&o_+`VYvY<*Jac}OUMf-BINpuFK>^M2gsptHDHSscARH-*(1Tz$PMiCBS%-uzHcDA8}AXqp{q$CT~o zlF#gU+#5~bx%m5rCYQpkt)C;4guRKv=q0c?5QjxMI62>JGA5W_&0cknCaSlPitf#7 ze-Jw|JkHLNd#h1L1Tf^m)JXG}LO*Tz67x2SVOJW~m_qup&g}owcGiJyUFZMqwr=fs z>&mco9h1@|rFB~yS(a@zvgAr~paadZl|+qgxkq-Ibj-}m%*@Qp%*@QpzxVSw=e#=i zUcs^Z{r%B2*YSOxbMHNjvJX7Aw|tw1A|fmG6#D%|8cFfnAwEjP5}usM%sbR`mi%#TqmOdBL`}r(-r8L^!4cqBr8?Z|#T4sABy&W*` zQFmqZ+L?g=KjUJPs0*XqxU$5#9s_# zGbryzQ1A^>jhO@c7R(3KeV5ViVMtR(Saf|5F68sR6rXkjim1A{lcGPQ&WL>8bNIae zA`7Mb!;quZmag9xr4eC1qS|evAJr46^%uUn3><|Yh46Ml!*+1F5>CjFF=v^Cz`x3D zLHR)rhY`q8JF9btIP7dbrr~Ie4hO*u#5xoV+s6?$6z!UYE_U~~_pVAN`UwrekP-ON znW*Pc_etn53@UR$B+cez9!GBJz=HXdx*|q%-MxI?B!`Yq`$y7vFeMePB}`fRjA|m{ zvA;bTr#y?Q&q8&QJWlF(B21;=adEOJUzHIJ;en?U^?>w#9^NzX)nqHvwS~6CtAjag zQNasY=60|xt5$6d?F$-!n%poViw=2w?(hfw1q{#^5g@cg`;I)e?exqqX-Fbc^vIz{ z3#I+bu%9n>?ldC<6?Vawom$6n3)!Tx>sOHGqz@FOq;$W@vS_SH#Bn@d(ZDT@eocF1 zYdXBiuwQHAZa%|T@eHVaqFh8fTDpz7-;(*7hAbk|X3N-+Mbp<|LhYfqbw}@S3u`M| z%s12+@u7P6GQ#vOdw&Cz>Tg1gp|U0p3~TMn%^aTEUfeCa=d=#Bk>`9%%@JJ&9zw6b zND-<1Hnf;IAX)R^9hhuy(IJ%E<~tgW-E!NAL^Hp>EtmoiL-$>T4t0Q`X-(eds`;J< zE2454;zp-s@EM%%BRI&@`fR=D(#j7s7!kG05nJzBB+WmB8FOUOFu;x{ZjNl96MbEq z!~94?647y<#`2}4f5_5XSDVXHs|Dv`5PysyBJHFLd&%KAlg#`?Llp6DJcS|g7ry@B zkre+F;#-M0Ga)71bX974^|d{hO2%P+rs0U#D=PzKfHFVruVVOqj_{#I*C*EObr@w{t7PrzlNU!T&cb1 z(L(x-iX)mYzVDl_-(N^aI0g>U-lFce(4nH9nB9*}6ZV_msqF&!g%SCdzn?aL!KDI( zQTBVtP(So%Z=s3F4P4A0)D_Wrl|Fk*F|_;<7G!~$D2E9Geo`0rE&h{QcZ`0*46c8? zPD>c3KO;=2V+}r%YA-ev!O=8-(LhDC6FrY%@fVWlbviJ+dYT^2!2A_~NlYJ<9l@o% z{F{m|82mOy*U<^1=LQLqi*>z&XdO+0>mt!A3*YjEx<;T}!+D?b);Q)CYTh>bg?j?c{=&`a=b>s8-V#Fggh+n`clMLHWeeEuZCZ|W!=_s1R%(rS z-N>VXq19htzFLAw^{t`4y*!8?jzfKAyuD9)F$XMDJL}BN<~ACdU8CQDCo?quB1Xdi z-WCD$n@ZTSBRi12YiUukUOj{Gb{f2hrd#yjbyC8>-5!BM1=|)s>Ai)pC4P7u?j1B- zJ4e5^B$6*L`6IuxP|Cu9<$S@FoFXK>(RSZFvq=LsGdf^>I?RXT z8KN^0BIM7>Q9l`pnqQEOOU-5tLPSY4c;wIX=-dJ)i`;!n*e*U-pVWREsp3mLf2&#} zN`Nv9C9w}CDH^{GVKiRqK< zN-`#|#)zi7)*$&(^-AlL2{gkohH!+dbugHKeRK-yji_v8R_izl72{B#`N+e6a`sP| zpVm5LiYmF2{QHQimc5ZrmLX9z0TJ?uOzjfws^*%^$yzj%s*I@S^gz`1E?up)$7KMf z5C9ZjIu|+DXS++>ge3%~q(K>yY!x+NTJsPA!*mhCv`NB*TFao)Ube2owBO~h$ltD^ zyX)w06J6x@F)^5B1QUgsG#sT?w`d2Bj~ zn)P-`lHZcUzT{QpE}v_!nyP9e+O9o{jV-#FQxZz`4AeIlbt3b{K(x!~?EsqH$iQ5z z!HC#TW<&-*z`;<}5Gr&m^jWTqJy5!`WnIhEH8c@PNJ)GrdevJ7tz3qnfe<9}Bhy;! z$mE_`m2N0MvJ@(I_=oJx{9x-7Q<^#RL*{+A%ue-16lMJ;e`J}oBhzQENuTPD`{oj3 z7tL<7WhLSyypz~ournt6tcFciXenmRz zWkNP`xrGjP(KKdWjS(~GnMy7Pil2EXnSoe9AmqOzWK#{PsLR^Lg8vJq4KB&rm;DQK za?)8#gAq~NQ5Zv^SN+ip$6ka3vq}25`=l-BDwKRFXwK}@09`WrOC+fbRq{u^fteLC zc>57N6oPxY3k!R4{L@YD1ZZq}V`=l2NC7%3R|G8A$G`?!v*b zV)Q_=rBSsC6}szG)s}T)vKEiWMy{!ENDF_oFF;ay5K^@9WEFh7 zyF|UNY?3Y$z9pXT;ojxuVH$v#`T$=X(pm(XVR<;ha=L`YSD=HwM+wAoXGwOoUd0}5 zk62GYJk2J9Y;`<=W~=9_EdxnDac1FA6?~T=`0fSG!0P14sM>5h^_sw48YS7 z092Zi!!eSZVb9iQsQALsZ({IZjHS`{O!!cABeR?D%3hrE;+AUm<>4KgXQ?xy9_lE3 z)+yjjERlagB>!R}P<#-#0IlpWMg1942;(_<3;S)k>q8FBFP1jpkT6 zU)f$RRf^^n_`Uu2*x%e0QQmqKqmIAm%VpSJDQsrjvYe*dSzBJn)hpx8e0_JNU|yx( z3kSamU+vQSDb_2c(e`TCt|c(j%IzrD#;WzAd5u~&kG9Btua^NC{`?)}tlv#CMNQ5!S(6;ik}ZMM*Mo|F5Z|7d$=B+|W}#ZHSnV8WE$8Zue9gQ? z@n;QwXZ~y3`zYukh<+>3M+u?R&FyMdx0|;qH#eFqHxcS?Ch_ecvVW3?s@OBv8uEAZ z4n?0o8jYHYquamsK-9bwn(Nqr7k9~5u1u6B&AU`FBqsT{5`6wXqschb(^s`4lK$7km=0hY{lzk8~#7|cR7^_a352+*KUAg-hmwF7cKMZy+I@xX+KE+4Wu#xE0 z15xu)XwbftMOZ7QGcQ&1T%lIXH;ThT@ocfuDCNuM zQ>uyR&&>bBj$(7NU_PUgJIgN&&G``zrMR{kP8LPg zXQ8^TkYpbHawk_Tm&T-cHlI^V#3-P>mG3J-q2Tin;G@`7!DF5o8NZ;4h><6IGZiTg zC13Os=Wmpzi{sVVhWXNZB-!Wi97?_n$J#{6nql` zj0M;nwqDNHr_8q$ACaA9vSu;}|2E+ApY8`cUaB@HO66j+SSeJ;OO?syM7f%8nD3}C zq88a_Q`iAq2I9L21YV)MXDQ#t^NoD5Rx{sIWki+2f1(6}^zVayOQHJ}+O^_jvsSEE zXKRI`{N>`=Qmr^GxlFxTF4Y_62da(8O+Ay5&tD7|Fi=1AkBkbIoQ*yweYVnd{3CTo zd{*2_Z-PPmkAY{uJEz^{@)OP3N@NzMX>M6bTL;KZOG&p?_>yNH4E5KW=`e zmLa*QPr3f?#2fS|ihd3e5&$dLm1@2=SvS8>$B??okUF}>kpD~YgA^>Wp)fURex-&D zrC>{;;MWlJ>I~DJ1zF(1Z2C9q*hrnB2cqV;(6BIIwOrbQ)rpCEv0;9vmWZ#O$t;vZ z$?ucTva2??yw5(%AJnpuXX$~c`6D!#S>(Iu?Bov_`^9LZ+6X} zRJ}`X8BlEd2$v=@;-!DsPs4!y838*v73*EitUf!|l$dM&qQQw6=olFPlTgb43OO1X zsDtI|&5~5WCe7be7n5n=PMd244ATD&`WZr(JQTT64&%-pv)YwOhiz1Y^%SYD`G-0y zqu;paQR^=T!x@-=A~49o(3>OUDN+~}5tsj^;)uBH?xi=$q2u4);VL$jVq?6zvts_E zjv;xAk51Wojl)p!Unnp+$UQbXzu36yn*XiGBDaTcuZN-Kh||S#b+Js$Ez0=cRpiq4 z_(Zfi1tqa*dDRI>AoQw$BC@%+Q_EK-i{@&|j;Mxb-wh$e9|=6$6TJq=OlhWQuC9iN z?*1XI8Ng_|hBw))tz4XF)Tc@l4Re&5BH~geUsMhyM?-SWBlw}E=X;@T%rzB%ru=+F zt`vM=dpoJu0ySAQRU1WfZ6$BCXc7tq*MR`-@@{*dY|Q{{%ap&B+6Z3!X%7~oME zkq;(y)lwBT)f*{&i~LLcm-YaXkM(4i-o|F@yUlS*j>y}y$qfv`j|UuY9(6?=9wI{> zI7jpZeEU5y`>d&-CCgdN9gv44I2d^dODH5G;p}HYGi33Mp5-e(N361NO;MR8LR%+NJ z_5ly%>jpuu1n(yE)?fyCdZpSJD{nWqQSgTHbW5S&wh$m%EiC40jq-T2BFXS}ijRm^ zawFqy03iAHAfF^;AEB{%TC*TM67^!eS*unX<__u^^1=Q4@t8y+$uOzTL4BmCxmETi zwM0Bks)hpqk!J#l4`FN5jb=s0%eI@%%8tnrQ`Kn)A%6?_Xe!8zrTy~Am=4AN&n@ng zgQ!;bFLljUl|?ia{QZ1gdXa%reir12xwf2=k(9Gl6%liT>WCCl)$L@S118$B?WL7m zy_l~RrkYaxRcq#4Wk=+|xSNlV#!zw|B*%(G#?(4%Mz)cxl}pp52D^pK9aR)DmrDt> z=~cj^@q8HFm=ni6?xd2OgkwY+58f1RCh^W7qSo&ULunDrR-|jBFlFwd?1}HMa5yLxCa!--4JVJ7cfro zYbrk?cS}S!M6%QHD!VmWs)+bjeOg-8-k09k%k?dW{6+9l1LNrJYNxZ<&5g}Y)JvC`CADmme;@I(g4^i{ zt}TwXWkK4+%E*E9jv2=~s)~6a-W$X*kD+44E0E^UJKx;j=^W^q%Ty8ZMH1n&q4f2phV4OJSzAbb^Ye2yRqOD=#(1Z%3;CjUO-a|E~ZMFrOuN83R`a$=;9 z-E30MNQYFlHl43X&jf~;&E;w?jW&A}wf>?nmq9p$AfTz_H_`RO>E^InBMLr$w~ZBU zcVCa9=n9BV5s}WBoE~eIWcpcgm$b{L^NmuqV(z8RDf!J2?^r>U>Qxp?|GnWy)eUh= zx)o(B6LTNcMbw58anb?^zAwP24Y~LzIefWj?x*O8=$_4h2qFIdz#~T5g-*Gq{2!;} z$~_NIN5mI$xAR?PP&7RdrfZ5xs=gJOQc*9M2dUvq`T2y;(>ks!O5fra#HACv$zONBC@oqkmUO5&` zPlYM*SxOUC^E9<=?6U}i^rwT4&(hsLCmX_GE4`*&RilD);UZFaTSzG3L>WPU3{%#u!{FP(T^n933mLP}CJj@R*Rc6ZhLa{niH!o1(e_H%5Er`L#awkp8sPPEQ${xHQmblKW~axBHS;QUMs%HkW1njvkm6TEjNEnK zd~UorHY>du6{);P-L-CBqr!-mtK3gtmP5~L;X&m@mc)!#rHWNHuTy+P4sfDB-L+)8ed&@o@*asY2}A?&D&JDO@3iOmgAMZ z`}y&EmmWvm+XcxIX{mc~-mK=z+hvw0n)&ZgS43{(Zl)*&qUD{igt?8p>8g2`DmMDH zY^nNg&~JH!q|mkHxwTw=HapQcKR#2KZcd)NWoz?n^B(oi$W0?&W7It65YGpG@4AAt z)x7WjXOOlsNbg6GTpMI2FI{ls<^yVtNajHs#NJNo2SH`wk1>q7dDH;6EF5mhPD~$C zdPEFLjbwx%dOi#fYfe&Kwo6YiMZIo5qLzp%1~LoWY_s1aiJFf>lZvRb=40yEXhfYQ z{o`r+L}99CK5^yfV37Vv(D8w7m#^f-=2MD}=*i9W6bpgiPXm0sfYVjVN)_M0d`2w= z`K1x*Ng4bGdgTTVERojF!it!M#(^{~nmF64wP#LNM1M|&5v5eWrlyFtM5c0|;Zgc^NN*rg-@S%T>5BAZnQy2qqDRKx z%{b;SJc^=kLWIO3{RuP8o%vG3d`k@xxna-^VQ(k(+n@%YWc=b;Nrm50@P z_Q~eBENLq=sxoQyFRF+rE)z4e5*$9mU!gfoH0hp^$zr20m9NQ&*n~8Q|E9`G`P~6& zFrsrLL1h5`jsP4bP)S-f><;h`Rpkb&e4{8xkGPq{e}afNhlY8fSd%qVGZ-?F1;_tV zcEp=YwCfWbYW@w)^+aQ1B9^Q(7XL>b=gB_~cvU|9nON*GRQwkT}(oC~1$w!(=S6ABwqiz0vW|scq5`|HA4ajaU ztn?f=H-YZ3oSrSynPvNCY~A6Oy_usl8220JqVbEXqyrf*rxNl%=J}rm(gEsK>O6OsJa1EH^RHh_8yuWsw|@N zkjSr69h+1gn{#{i7;_^PMI;Hmou5aqJdCbm;X+L7%I=*x>7A%G>m^y{ zbexJJz7g-WaUemV;dmI1hCwnFj+LIE?1&0~A{W8!gq{ef))0%k%2nw)J4w0gtsz4NNsr215-!{jJ42qZb4rnU_wM?%QIG5FffXw+t9 zr>L7Ke6u|Bh#WmApsG#YPwwg9qD|M7{BzG5w&J>}Vk4qYDx*>g;b#DjB<||mG zD$8yx=4L91$PN2c*?TCOZVnTQB3DhEl2)FdI)01wsIs-i07ca;p$h8lvSE?*=iEvq z5g$4$A%yr_1Am-6NxHJgKRgsM^q9?qwuy6y5u<1952ee)+uv4)vPZ$dH~6{ z_mARx6R_$$vWsrih%`-<>dd6h#+6gzQoZ zpq;aoMop#yjGGHp64A*X6kSRndmGrkUMG8dHL6m)G-a&$;Fhf>uZs27>ry1D#-L(d zOKPjR*@vm*?eoNDvj!LNe1anaXpUUWhE$dOu%uf ze2nA-(56f%otbUOc3zlWZYI^YU4C_BmZ2N1*{rg+ErCoeO_CXgDTDzvB|CdyqR>2R z^Oh~9q|%52G(82uLCC)dd_)Rc32gWg5GL2{YKcfj7GQH@jzuR}l$9Yvy~9;eY)@i? zy^_=GarLR<1T4BLaJlYx*~SHv^{T3fsEr3P!`@En45&AgCm39kU^~=&t-1LebFuoS z<&Kdt10%~WQVbeoRyG4uLtugmW4icEs3N zrcDk6(U$;?oSNgxbBn82$ckTcSA|CeJUt?98KmD0bYyS7y*|;L$nU^*oOf44#FzDV z+i0ad2Np%qJs?5_Iz3Km2VR?Mi>Qt%f;zo&1FrAZqqNgF?t@M1mP=W?m%`-Jw~D!%(pR#fhQ_*GVU)UA9yf(STM84rb5`S$Z8J zDc%b)lDgvk*n>NXQ z&eHpFSZ3)i=Q?KJCSRRX0Aq(<^>FlE3LpAV=H-Nf{kfI4IiQk=e8%2Ig9i}12y74+ zWK@%5BTEY3P+V{rDwd(}SsD6~<-inqy&W|~Z&Osd1k9`3ZiEf9#jS{Z_OQ$-7<1BrgP{p>K$FPrlN?> zKYzcCR~Z;(2O(p$TF4c~YqC}E<*L{;I_?HDlXy3ohrr|)u|ss$7xUZAy;KrWjs@e}cA_T`q~9BK*W!>(7%Elt;F$X;e0?nrC6Ikz zuM-`iYY0xj#I<<8{vLJ7^xD{4+;?-9)Ox?SwuMP&Q&Y zG%d}S>3nUwj66O_*%3o_nF@q*D0wg>Xkw%XPOuk&+a9oM9-_vX(NB{mFmYz!$!Dh{mQB6bxDq9wZEXp41 zABLH$Y|Xf9vNmHLr?vhX*@ApP;6BS&U~Q9pgNS{gS`P)9_9O*J?ZB$}QG zll+&7_Pz1K&Qfz3N2NST#k2DJBkCCGXs+=OhvoRqJu?ml>B$Jv@uKjnA;*god0G10 zkkY3@imcs5o^jdOR(5H6nz|xle>U1!3I$Jxz@{(myoa3z z#wW_=8ES~g!}_e0a2z_G2}d~Tq*#+>Kq`ocli5d*WwdVT58x<5Y(`ouO-{Le6`!Mq zh{{MN8}5Pl4A1o%_aTt-N);I-G|yX)Ci@@?qUQO~pl+2|8McT+lwY8ph$@fXZX=jq zbcP=TM%N4B;*6tSyN_Gxzer6HNqJBU5+rI~3=KYz8v$66zJED#Y_VltvOW*T3|z*< zzblEJm%@XX>^o7>Zz{VIyi6?-(ZJtJjlZxUN?r~L(qpg|QR=;~P)9^7VZa_oW&65b z=?yNTRct}=Dm82<#ajv=;nfhZUxL*ZX`#sW>gF}dkLWMVBo+%H{ohaVkupWA<)gi?j7^J@u^y4(>c3=iG6=idFnTho#)kF+; zWga3$qwvki$7#rXgG-9$E$WGQS?R~I4AS2UI{M^nSays_wfb$!j%X8Po&gA=-wrg= zw2P$d5#R8Bv|_1ZG$MBE(Vneh87` z-D{N2nVEw5fQlkgeD(zrLi`7TzlPwYM;yKAA5!ogMn6H&i!S$)_hBDgpU7*o^_uyJ zQa6w)nk6tu|0w9Um4KzIf_|@cY=PEYk#!!jmt1Ra`;hcDd`#W<82vKQ9lnfqVX*9T-U>wcmsmWfM5jS+=qVy$-zN7f{^D&yEp;Rni(_~@Bx ziiHsWL*S9}rJTX|Jts&0Na_6fB>xy>BzyS<>C`ObD-9l%VSb|Yh_DCr z5JJOGVL&}$zI!l-Ra|oDnpA+0C`$$NGu1@Ye?VX|?1!T4=l*fJhnMzrvJeC4~6D z0*}U!jMT{fDZA0;{hPu!kA6$R2#>p;+`og1mQc_kAX^`A-D3Ws^oW=>pih7#dj1Iy zdXZCIue;7Z^Gx$ERYl|l1N&ZqqU+yqvB;7+7mIT;3A4BE`9G?P$`1TG_Y8=l|3Va0 zJ7u*umW=;T9TBBV_M=-01xK7A0z@jmg?u@G$!>EM`DygQH0e#~K_L68U~AD*sTQ!g z$ki0R-lD|F5K)4p ztE$pDi0Q)Q9}oU<@(e)_QDLfB*p7a$x;a5TLwW~?cM|nz6rKnnqd6Z)rtTGYn3Gf! zkxbK%Lj>6;gU$G5_t%rNiLuH4)-Bkv{}gpZj5G`@xdcYrsjy)dRCdC7LzaS1RLp6r z%*(G0NKQUV51VRkd9>abR^)R??HKluD%vYY2}Q+LAPk z?L_4H&eD=BZRLfzjVdE%;q@4l_AWytwYP=VmsE1-uN=79HMdjOdP^!tAo}(|qy6a* zp=w>W)>@u#nL8*xqK4M*7%B&%B?k){Y`!xG{Wo$Lc5Sy?b+k!s5e+sE;cN33DI&FJ zLd)U^701ro!L5GaU z>Bg{V!9&;( zP=obZ?QZi@&gnFFRB1$78kUzUlJ4{2Mp|v!I%YY$shY2|)CW!*gr#3>r7F zwOYv)aQv>UB{W6V-%V~CQGrNP`Ml{L`XQRMiUFKJ0Nq!_N^FdKO==L<_Z8j!WcHN%dG5q?bXzxxD%U+e$NY zM%HTPWWKBH7}_Y-W~#DG8M~E^7iYvet=8GmU(2?>RxFX9D#pQ3RS+t~*IvKpbg5FB zmeKsG1|cF@^yxo!9BO8uIaM@13)Ngxwg;b-ft2x5zG5y`dk~Ajlu?m>>{$qUaiO(L91)rEX2&JEh5LXsmoV` z?X;dAi>@7Tg%NibBJNJLY$)P_K{}3_4u|hJHx64U>{dZU1j!aHoVh~zCQE$(m-180V_&PUkNXk z%cYrmscxF8i|CQ`cQda03y-4co)Dqzm%^{wlrbXd!8LR0Sa)9=0VMAM885C^intNZhC)R;+(AJzgXr{Zt^$QU4P`ZuiXy6^**HiD@%ugA)(vFJ1?Fb86~Ccm3I^$yg6<03ow5c& zP60Oul)b(J*WFLh0auE1x(+(4Ml)t{fyZgyq0XNJU z(L-r2Q}TvhIT)lH&^6Jy1=?MOuQ$lM^0}ztfp=9Ya^_+ z55at_2O>`jLF?l%K?33}iDXPeo8L z)Y7XBGRtG_@@XoMDC>FvAIV);w(=-^I)p(-*)EJ5JwrtiF(O;d1B38q0*-uuL%_0w zQ)gM4aL-bDL>C3`#D@zJgg@KE+e`cQFBo<($_YLSkI2N_oqRzdh5YA&k4#{CZoXR2 znYCrilapR`IZyd{YKdsE@cc#FXw9OidOlS62)0GuD3xW(=?l~m@ewjTIb;z2Lco#1 zNOMT0jmbP)?EJu)!7oxpM3xzJB_=VnyciaIYF9NbRb-4>5?2s&;i@#P@mwb&}f>~b4}lMQr&zCzg%lX2|rj6xX_MX!ViZ^pj= zYMp!5y-FPs<2ph3<<(mP*{=pWD9OvEv3j9mUZe1cM37CtmO{a6Az)=yvgbW(?Zs8u zQN@ldxm|Q$r>5$L$bKi-h`VwglN@_3I~NyZ zm9=@7;v-6j;3%gag7kNT?mjZB*5*A5Uf)ON-DJKO%-|!BH|_H8_bGW}AK7u}ct0FL z+~=xc^8qz%DDD$N{0D(YuBX|w471D5!1cQMkUAoAy=Ww_V90BdiBBzqq3KNvbE|VKZEevhTQOM<@gD^q?O9Gx)FaiVONCmsUY%%|kEv{% z{MLx_AvMb}i=*@7aH2wB3tL%KT#)JKw$uC*s*9))WD4sLMb#(0Dm8&~TuxK}l&T`W zQ|ifjYv+YT)2CrVuWYZr*j|<$5Ejg5)DtnX-MgDfy?1G0RDBjIROWlL{VHRa?)N!0 zMO5bdPxm8;me0cyBp{h8SudF{sA5A2h!Enx2t4ut-Ev3{8J7CUmsAk(RrFrIEWIL$ zmM_ENwwse3gNwTw<|`^VQ|u!mbWmZix0Cu+P=nfnY%C!W!hB818>%f>3I$(>fNk0K zau=uX>6Mh2BKR%!L{w?}7r26<;oC4EzS_)A z{_Ggk5zTkhF(f1NJq?+hQaO}-7ZRjDbcDNe!sMt%S=swNwM1ko{%*c7e<8cP5k=AW zAwnBN>My7-(D4IxM06(w6(0{E`G+8*&5GKcY(m}c%KW**?pWO)sUad=B&vrg5>-Ej zidnEbSA4QE>$lYSiJBr}nBPT%@5^8GV3hq7G9<$J&dOoa-nW0%{7fAYNrkQ5x#d+i zD^Mwf{~U1p-8qgabJ65{-S%qN{6ZZuZ_JLUy3d{@(eq1q5Q%M}(_5haE7e3q;!KI9 z97=u-3G!Swnp3RcWT4-uA)=zlJ2m#oMlxiO{#(%T9nb~PS;08l?-adRem){IOmrde zeqw*`11*`Q3=>xErtyDJ^7`6kN+A1>V56$8IT`XUH2hEkn?I>!Y;^Dy!MCGVA&|~L z!-2wTB*OE6$!?WH$v+`M358)3o_T})8sy}mar3YBXcD6?2@W;?hDOV89!&Nh#jm&g z#_gp37gUUd1(V9BcqFq-nlb-V!I1oK_?&WuQFp}6M286Fib;Fk*Hz@F(WmD0DPTz< z|El1lap`_bmJge&DLo?3=tCC<*++sc|Bc>RcgtDk>MD)s)Q#Gv-R-Qq3f z$3PG?hRPKjxp^byZ>TXu2=T`P?<=p^n7vY!!; zr%N+GJu_`iP)$S~xleY{L(z0%;(6+__Pi$DhS&!FBo(dyc>)eCC&O|=5bf-ckSsE7 zPL%VLb#sc!3i4Z{YBz2bLn6kdh}516ZP5CgEy%8&vY19?5s@vMm@I{Y8$*ES^8JaJ ztiQ9m^PIDq=bR;fH#bpLVf1r(2%pPeq=?j>4lT-BWL+E8HRBW=h>OI`~0Vig%NFb1>O_;ggy=(x4FlX_cFQ6sRKT5U!)p}L)7 zBl?3gnIRBF-yUeZ52@|XG^;y`HOvaUgTf?;2>#nThsd ztI%1J3HUkL<*g~}`b!ma?s|gZVf?QCVgQ-JI1j-nVMI$SX{XVAlAXdAn-Ca}`F~`QQ zR_@QD?*jPHe6{&zS=NX#&%IDR5uJsZJlA8W*ak%}YcDP?wpY6CE;V`8Y$R(BQB;jV zg=Fbx`PBTF32gCpR8By7vM8ndCbPKSt*f6@>yS;r^!}0S>MfP_DcF(ndLtPJmOGcr%6=@$EU9-) zeq}`F(ax5M%)j$*2Y+`QI%eQNz16M$aNVRA zt0E%p_E|V$F*MX*VC=qZQHH*@=A_b4FUsnpLfJ0YudAyt`g?aBgwNwIQbcMS(Ao#X z*4d5bG)wPUl||IivY&!9BeIGcXIyvDs3&Ar+806-3na5|cNT zLHr)zdqreLjvr!aGOzrIWR@u+J%)+}D0=Zx1_fIR-$;C{+QlAwfyJBWUQ^OS#A4>U z*{AFw6_z2pO?eQ5zTZ3CSiyvBuUXw`+UkhNMT6!81`8~L^h-fU+r;M3vKR&{8xANu z;>)J<0m~qL(TBaatPumHOY6b+S<>i7Ntb~~Z6w$!G1#N6qne1?h(|C9_={cP!4Z6- z6)0WPye<1}DPac#mYg}WL6+?p;tW|eGh~W1)M8C3)Sgq^C0y^#N=%82?pU02E5nADal;! zIhm-%DDx0?L`0cPF%hDudMH%L(_K6CVjE)~rk04(G*f>mS_%aZhX6w|u2UFCT60VC zN2p>*vEk=-^yyLdAX**?3qFSp3*KbjJW34_pCj`SWDx#nz;%qVHomiF9;4`pF1SQ_ zgxg7dY(SM&^b2ic9=AcNy`9v@gNiJGrVE`cVg+QJnNFPZ#O}sE$tI6!wrVRd0$ksY9Qqz#07@uu=P$KI0p;G^1 z=zY?bDZz`KedZ-E5Z&uTbSh@|z=KVF2PA46d<|Gz2pQuS5ufp&&Vjs8%Ff z^f~!E{@?W5pT0%4+vPV$M7I#c7a0B^MP?A*iXfaW;K8Ffc{_;nHVsEqw2wH_+JYI3 zw<8!RYkKjLJ0844r4h}Jz7bP5J$NS+i26F43_Dzycc~$wG|SE;1A|ZSZs1XUWUXmu z4eL?OdlViKEfZBH3n2NuAO~4Or8-@oH1AXNhOz`gh<`uuZ1LG~Zs{~YD3Gm5JZMbMG-ai{`Gn#l%FZDExckZdB)I4>?`@$` zE=`xD2fH}Ud7+>gphq}C0U=$1mkmmxr@==P3sdm`*8)fzZE?JFwU z7V&rkw^(pE8ovr7Q#AXLX3A23VF~;-b;YFUbSIQ@DET@hh@QTJi|Itp9L$eYw4u_O5aNFfy#0P$ zFtXUW+~uD?QAI?{r%(Q=Bzk@d4^p|tE0?8DaAm^JR2GpM6wxPuYlvJAciH?# z*|!@V>4ISdyRhxIU?D=I)v_0ro2HR1Nt@l(!*bB-@01NRUEww|LpuTqg8z zzxaQsXq)`Th`dw*Od)#J!_oIo@3UWTvbEeYbE_TGz)l`On!&T%T z?b4fwucLszIi}#~yDEHm9wcO0V!yi3SzDF0`d3p`L}8Q21bPq(j)VY%Mj9UXK>514 zx=JE?KlJ{-&y%;0a}C&fDX1*9E?dnzN>v+4LCWDH9SsSx08Th4RExVX!}ppoh9UC} z9EXN$!GOlCtzk8qlQL!N+A4_Xf=^ejh#>nqV4s-yAa?shZjd54YEN`dms4E8m)y<96JEW=;u{6&EsXZ22#C6vNU8q$GyX9MVK*(|EI1vt1_5GBO71`$=hi;_VR?GlT=!l z-yZS((lCC<^bb=82Ipi1NAmN*gGfS=tP+)HbBYFOXIzlNp&|`Z_(L74>RA}9QxU8_ z1^3*ZJ;t1-!PquB7+O@*uUEF<29CZP!-r;qEqBXvOM4dPSZd!yWf2i9Q)>4>)SM2@ zk)n}m>MnCrMX#@7yNj$dz`7}d?6wEUPOhK4q%<=l3%YKmqUq5u%;WgJ{KY^vgK~2O z1##Nv92nZ=IUBMdq|qoB{YLM%(12_o9S{#=K>Wo3GJ|nT1j7$(Na-&9h2~ajjOam5 zl%$#2Nq>w@~XLmdLm+b;&mlK6y^NWpn$g9eT=a+yh$w)FU{Y~ zXy7j(>uPhHd*?Kjzj^fM2qKTWpWJ(b8;)Al zEBP52MrxQjrALH5T?d$GdJZjnU_oZ#^EuAOGV`j4=(`CXfEgVTq%VLT6mGIDVnp=H`GpBI-+eHHMOs4UGWA3}P@hr39*QQD ze4v6(Vl!P8ZRCM^AZk{<2D9MiyE%7g@#b^Pnrb4dYVK}Eg*1ttgYck(%%*tmYEY<_ z&E;x|Xjvqayl0Sq2z*4f%$zFD4%RafPe{H1|?X zLZ2JXSG*q2Zz4fadJtoE-OxzCZIYHAEzYL`zs1#6KK(6n?e= z-z?R!ko^(sq9ax?DTfTwAL;2XYUU@gy~m@J9`RwbF_RGD9}WBo5_sQfhk*p1?NyZi zK-n1OpLlXg!+Q&kB9NwAa<~iAXa!fiNG2SfuY&7KL7nMd-{E4tHuOE!Q$HRAu z_}WWz`&ylq9JcLFz0QAtETmMM_kCg2o26$01Pkvd>izF5sXP4Vk% z>UaRjPxnEWA_s%dvT-)fuY88G*H=O9AxM8F=!kDF72%x1Mz3o0EVV=o_-2z+l1AaP zA@oB>HcRT}T8FSnkE|P>;W4nx=boeL?Gn5Z?a!6N7Qsk|A)_H=2IIL12I{rQemg6i zOYuB4MtrVBwrv3fKObNeXm%K-;EyPIfx;uQZF?^tCW)cqg-{@_**vi!3rMQwMaqwe z52*rE`c?%&_KTCj$Cto=<|T^XNbo&|ikCuxw)>K7OOsn^o0lm&qNjm3@y)o~$$UAO zcqJ|xS9VDU`YV(hQI2IJFBpWs67ZlAH*L>@QTm2T8bXMFHSnwgNh#bq)S6$Dlf@<1 zea(6ViDI}1q2RR;oGdTL7iC;aSFrU@PDT%9Ul`e@u^}BfZ&Kg3(NF0K z^!bYrN8y_xL~WlNfKbWqbZn~gmi6d*_fy!r^f>C?3LOeEAJMC4Vn-xvA${x0rXRc6#*tT+g)jty>tFccz|pEI(c^H_W?KyP>DG6bjxAflqr< zW$=?F-lK+y#GOccJqQKwg#ghbbuP$kt!&<>l87o!YBfUdTo57A^nRFvEU+#+p_&h< zVnbOV7=(Wi@F3dBVm~ZR`H*gbBiKz7Y`xCiD4@TKX zAqyLwGPpG^^Qbd!-cqNr`%s?u34VG{U~94yvaU~T6MY92EE%lB_s3ZHTiklHUoEB|F? zDeY~|t*)6?Q~DHIW?7cHugH#dU(z7#l3PX;$f)s#Gm;pX^bh-K7_cuRU`L8YR@$$c zuc$bp7Mxmw4gir~1rpK6Hk!tZlDrz`Yl@DD{h8*Lg%JOB;Qb_-6*+;beaL)6;Su3Z zWZ4!#@HYWQ&fvOp*a$W)^DTu(ggntSkS)g?go1B-0n@Ey8r7Nn#j{289TjXS>#-CX zz6%5Tg%M{kyfoEl%rv?G>-SU=(fjCcXI%0Z%A)G~P~j_ca9~GXb_kK-haaeAW5W-Q zL&FbY;5eUYE#Rz`A1OSdKug8WdCMU9#{lC)yLyzY?!#&#^AqJqtS%;ja`P z5%TP-A%yr}r|{CWsg;a(F_B|2^>NF-fedpI>NhZEk6v zY5t&|h(aqwFr9@zSo2J3{|GHANQs3_a`u5t%m0%)BbrfqKZSaQWm2Tk_Gj2`A-1{h z;pO@L(mBJwH0@vKn{uY8O!1Y2|Lj@xf6)NcLW6=M4|2xgdZVMdDo2v?yI5WGEF;qo3BZ_>}&l zKbV2IDguETFK&>!W_$IpOlFsd)xVmnslPbd4-Cq8`Uf(pJ`!r*%w3v0513dgHUh{2wd8f-fPZHGe;>sBC-Z)(*sC88f5ez>AW?!9H}rfYp$t=ZKL0p0{Fu8 zs)wWRTJT{e6dqTmPq&xNwbc?anI-X-ct5e%0Ty3Co>QI=C!X$Vcil`%J_8fB*I*x@S=qo9W%iPA}6d&;^`qXcPLHO~2W73t~ z_g9VrEmrEKYQ>zOf{5^^cK%fg6(@Ry+shMe|7I<})10J=hzS|}HUf1ZT26)qA3e0l zdZnTqL3)acBAOC|M3zO@sc>N&GQA|LRw_)%7K*YRchQ`t(xUw4fNDm>s;q!X`Hdk* z+2iWFc75PY)E1FBX5#}P#GelQu=4ic+#0Bn0BooMVN5PeIa(aqo6oWCpe(e|Mx4x7A{IwHDn zdIXjWy~`eu%3DK;WbIe)$h3a$wKx)D+PX zI&2=4z-YTYZ1_SpTeh>*?x2>4d@z%rlRR`wrnxFS+ynp?&`|IV1IpxRZE z%OGq)5RjY>v<_e1F$=i^moLgO7-p+FBO-^tmrv?1EQpe`AVG$lXtmAE)+USQY!zL( zHXv!VodX;CUZmQw6DMY$tBTy{_ZQSva5IVLfryu6V~xzuv@!0E3XaHgGBM6$s5l>r zUZ+cCc6v;DlJBH~h^#y#un^+!4Ezar0vQySDT-_D#ntxmbW5U-?K!@Snj)5BWO|M> zJW4Ns6gdwnXc*gVx-QQPRTfd9NYw5E3KiR+@YP2>_t@lB5Rnxm1j-a2@(+( z%RVOZv(hO{FgI*3$iKmZZoZxh*%foGXaDA3ixPn z`(u&{veE)mD`rgRkloFus;Z*=-hdjVFA_Y)#yfWoQ-(?P4AjBAw%(3+7ppPi7516e zrX+f5@SKQ8Y%g~^Ys(8ccOGlLF72~YeY>fvuNd)!o{~&jDJIPgnAtdxbD918=VrZL zKDgD)sxzX{OKhi^1W~kOplFNP`F|s_lMT?{88!;X^5YX`w;CeiVm4iY!Kb(ca2p|X z88PPZ)y-Yi5RpM()Ch}^`F;F_tRp6hqPsy94*y6;$4W=e$B=V#)D)5J`4JzQ{Drcp zx(8G!G=etUWWh95Go(}+-b$Oa$7kOY0@tflmG-Ud9ye{~6u)J3gihoR7QmqI0U5E^ zKN$wL>UNZDUIh`gY=3{^HT7VWEkG9J*|K_ygY_*HZ79!n7%KKc5p-xsysMdg%HL?* zwbkqWfCt4}K3F@`R!2na&8Ck4#pk#bs@|X$c55^TR1wh!oTZWt$;9wBG0(hV1jkavIuw{Lj>aJw-Cdp+G41$29?pl>tn=@84 zT~$WJV0SB1N&-W}Dhx;@s77)ffpl1`wal6-BC76*DrgTv!9fVnGLSsMuO+RT%M~Bd z9TDUvN+A0X*myni8#`sWr=0UOE7J`RD?B3R2Jjw&^eaG@fsi3n^77*|h3V$xxm&iF zd#SV{w;+ES(#nmRlh=bYIQK?ydTF#QU32C>Dvfy3Od17(==%cANMMgR%1_9|WSK~L zKgAE3QsAp;eR>-mhmQNh(d+Zzsyg!k6-49%nedZA_yYlFt;j8BLf55C3cxDj2dN{f z=9gIbA`?Y=B$^%!6JM395=`(ca`O6Bf} zs(PM^ViGh;75mEYH|flp5C-Yb2OXaT1BWxRR8b0^Qdx?f7pNfOlO#qR0}2%{go0TT zmJ`~umnG+WkqRQdNoq}^hmikb@X>VbWv|lpD%}_b^AdGLB%J=at3%QBQkW3A{HNG8 zTd$dysboW+0u0h$4*Cf~UunyhWUbs(X>v-Y7dCz8hIxgGBF3j2gkOCCkk(hiiXMig z?!kGpnk#Q_R>o>&nH2si)kRc1+}+frN%XuL9=BCY73D?Qyha6Qiho363F^7_c2Zvp zDk_yOYeq}|b&8FsRAv*qAc%gwM`O$E++0omG;dIJM75vy@rCpVB)<{lV%;eY=yTj+jdOZ0h_m}Dx#8^sV?_G)Vv)U^kMo6gB|WK)uj@GnrPj;Lv`cw zYa`;9r|`-AMH)%*JN5ZEm}9?T1FIe1rNWr!kbC>fDn2N^vL7%BxqU zCi)Q-l}C#L6eC4&okjDxK8jETEox~8n2)J4BJ<09DC~bt^v8itMZU4T)OmNyCVQVy z$A%-HXHoV^$c~dY9ETT_p< zFrQhE(`LfHg2fF9>;Xf{qw}+H_8WAOg&cA|waoPToLY;c34Z3(H&7X%k zs1(<0)3U7Q3u=p~fMv@`LWut&@aR);g>ZRGg}ZruNd*zzd|83RQ1N9bm_N$D-9FJ95KyT2wtoQk>1CrIiCN!K ze#HC9et1HN|E_@6_<~jrpD#TTEk1zxp8WVe=KHX)1i@T!Y$IMjyu3Phs3=<>|3E`9 zB#ZJ5N`E_}gTGJ~1M)-RaV`APTzhd(=TNTb7Tp!3yLi7CJI4G-oe^u{J%>(zk%dzJ zW5^jxWXx2CO0DiEs*A|!6QiXbLHJJrM`u8xv%I&x&+ep=!~bO0b8GsUnqoS~h_U-< z*er1%nEs!`kCffrXJ<~;C*6J?R{jf>M>KjogKyPeB!QIv5>hlqus3mwU+-7y*(^UF zRap%8Hs<}r{u)@k%kGk0Lsy@c;lb9j?8aSd?Op4(7R+x{Qyi`F82dPbm{k83YIINA zjtTqjey6gSjASi!>?Rq6{~mBOSTqUBF@sWlwU$4qB_i()2z;vGT8VW25l%#U*DSZL zKdCDs=jzj-?>*C>!9QB~Yil{n{)@6Bs)ceF!|(5AnEwjqsmb@Wy>)m`M>a+($tq!E z)>c-{-_#S)H5$C1^!v(?8HT?j4CqDh8ChwWQ+Vsr^Mem&{-OGa&*dR>`->Ej+J8ce z+PvQ>w$_?kuDvZdB{p}R`3uRIDZ>Z1*jYKMWc&@Z$k5Z6~X1T>vr}(huKPrxR z(H=&zzZgJfF#d~RV2~q`zwtA#bQU=+dtAyZS2+An!!qkaK4f@pes~@_z`;-*A?wjt zarf_TO1?wO*e`Jv`A54>DPppx2PD2kK%?%e(4p!c#M2!ZfVIM_sW74i9w6upu6syY zkAxL*JouHJ?d65eC`Lclj#81m_^yr@E$UB0ir7(Bm%x8gwt;A zCfDmY!`@8$1lLu1L{`wJ2Qy&Nbv?LF7FX~M^QOJJe@V85owv^Gt21JLL%>PLg%p?m z8^Dj~zua0i^SKKCWDUxyZ>Z{s4`vax_N7sH41{Py`W&j}rfJD;6v!nHn;WS!BFT9Q zt^Oj7r1)5fgCTZo_bIDS&2g%W7y|3lRpU5x91jQTFc+`2)?^pMrIppg<^&Z))L|~; zjeMGbLHLP)BV`4jxG}fKeyEdF5>d$v0O>g5fi#{B<0)cH6cCk8-Ih_-e2SXO?yGLR zTu7oBhEoxS(~?g;YY&VsE@SM|hT^pKgks3^XVDDBjWra=h-^;N&DpXfNG>I9m1ca;S9&=2uGND3euI(Sw#)q{=T`X24SPACyk`|42aRl3Etgow`F!pqa24z z-AuK`(Xk+4q~l+}r1$3VA}{u7+g07LjnpmF8PV_W@2AjT^k9_T5;C+BP*23RGWGnf zqPE#@rMhjSAJG%2^A{nG!dpX_daF`n>zGv=t8b&qEB96dCcU?X7g>p~B1zAEYi`L( zZ>Q3TJl_NOoca0J+xam50&`l)qUubjkR=9jxVU_BOL$aR6Q6OQs&PTXV z0Qj_q)0bs-dRJe{oir#B1%RjUjrfZ+lHxl<%t|qLs%8J0Z{xSuX9;Rs zcywI=7xD`kmsp!$brt&y)e@0kxI6h=A%*;H;M=CY>x-|TN4_$*&*W7SQKratrM4^I zy}lHTt}(bymuK;P50%bzXQ92fJEXvkY_m40zKA`w0}747b%ILw6x^vCLwYS+ zrnNlps);3)U%4D3iKYD_*nQ;zefbs4%fzu1__*6uyiH!th^)$g&*aAqa1@pyM2d7r z;IvSx;&^l!O`29+M2hTloKC=^s{$8l;lAc1_2k~xffF*4C6T&is;Z4>sd^Nj(qHuD zG6*vW0$TbqkA}fB7ppCzr60_`;oW4`z(hBU%s-i3?#dD~sd}{*sxlM8?n+QsMMQSs zC$S8G(b#|yAFa^o9B8+4_J2iIh-Ot5@zLh({d{>@7F|2w!n8HN8NGdNJ5>|W;PtoC z<1czI5SvGF7y@I2_9=ENK4KUmF|^408S+bjMg7!1CAvt<9p4*!R~1B5HhKU)LGMy| z6y6QO(?u9$Fs>;mr!Fmbr8m^&Aa_@LRepU$LJnxu2iJY+49h(b7DP&)4M@*O-A7G% zN2aMk*l7QEib(A}p~YN`OIKR^Tnd|0)7H@s%lr74Jp##lKt`F+d!X^T)w!VPn^#Xn zoyAkA@)wm(5=rp_#HrDY+2uW0E?Vx$VZt`Twp1R`$sT;c>=#Xu8HT+G!_DR4gF9!W zxh{=n>0CE0TMh6P>3tfO`sf!P5Yu59XkeK3BTV?z`+B2?Z8b-HYTid@k3jOJAR~K| zJa9}#uMWs#ma6tWA5cR~zT+kYW;ygM!V?y@GUu`A+a+d6P1{DlMIVC_(J91HxC|lM zeg4;{+EPZC`Td7$E$OrCqUC2g>W!${cu>OIhf4hl^r#UZS}@2B?Hj#JeGxTccc;|_ z6!H!DscKtQ_5_#BmoK-juDT+gZAi>aQ0ZQU8@2J}Wjh*Hskj!mSyO35iRf?V6Z#8f zQFRciR1MK(#6AUHuF5M{Lrh|6KLk6nBRQ^YHHSZ%uF8>X$i;;8EXw%$VGTe;`<)~{ z@GvIrSHOOIu_ron6D`{5wzMOc_f#dQ|Xn9`3WlB_lFy~ zr9UtYV+n4g^8sp&i2D5wOzW}edLUe9h%52=$Jb>1S&E=mqD%il>W(OdMtt zHs7>$51Y^*r6Jfe`h}<23`+Wk!-Wj&qY+q4wD+B@(yecU^B4_I!~|-8H-q6XJc^>n zLWGp&^MSFs1<6z}VbVNKRS|7Ge?L|Jq6eew@m`i_n9HYF?U*O1EFxJCY?zZp*AwBw z_^qF5g$Y}z3ftHCB-KT!82&}7fB$c zFNYLOV(j1MUg#@S6wxH+eYErlB)<}5&hQCG_s7r)G3UqZUZtj(I1_SgtOCvv_{ z&5;Qe7;1f%kzrE(dZ*DW zHkNkf*_9#RGBYzXGcz+YGcz;(&N(x4-`lrO+AF8OKU!-P&H2pC8@O|akm2!35w-^P zmqNRpLQfX9wISNo>-o(Uc=3A~S=VIXyub;+2>&=!9&#HANpGuOxlGMk@kBh}1?HYnX9NunRN26P*_o^x@8Sh{+RNzGm zxnK!K_d+MT8s2FMxNNS5QCZR7MLJ#o(@P6e#WdsHM!faps+Obb>*hd*D-}Y#hX_vJ zN?igrbjP~%UIRJ$=#oIJNkxfeK@s0aBB$!GyRrI&w$p$OU)tLe-f!5Alk-=oh*|_k zMTPzV(VVF4>P$@q?9#=RnlYyzG^As5~`njiWIkAE5LDM>?_E7^nzHHVIIR`e42nzZ!LiY#s z+*W(@)>N_|H(8d-rhsWa#=P1wG5q3mR z$ZrwJZ2;YNGSk0p6vs9I#8QTGY-xUnniU`L#w9*Yn1|}`8pAQ4U12;MQ5E}pWU~=# z0)}_!^}WLP4d`gZIjVA&1JV)V2Sji{DJ6{621iYcy>ve`pvU?vMrbit5LOC)#1zmJ zF8v{z3Wo0d`ebZ?Kej}ii$9*w97o+dV&xfBH^Tf%_vF-Vk zko3pwTp-i|U=Nr8VsaAkIce{I~2lZ&+^QvH-=QIUT`G7anU5?^*nMD}lu?U+|)8G&i zKJ#}YJFMQUF;=4rK|%jPD63Q1*PhQVZOC10_@}WQK8h?AKW*v2^_Z@b@GmBTjcIXD zlWT8S4{2=K!wtarDSqyM8{RQXEy*YarSPb%=>H)d{~Pt|rxTxS)Ap9t3 z(%?%jZj5Nf9x$sKSF|KJdYY`JB;)`*-L=w6$d#B74lTJxvs)|{m#%Ddhk2JJM9+Z{ zJt40`Bs-scfz|qcdt2HZ0edW5)tHWkm^H-Afl)z0_aT(6pbvLg_JeQ6{iggme(ZgX z>u7{!8L`X17||7eKjLYzmpaSMTO)UW<2uT?6kpgJ7rR89zg${yI?@p{yhZx`CAjksBe-1=nQBkoDC610FI5iumO;4S_gcAkV zHH>4Oh7pA*DJUn`qcpqxa_6+Ag9mhacChO(BRRVKtRq?u$gms{75s3*$BVHw#RY;R zjC$FMQEI{+N!+;Q(S~0Yc^zPV3`;fB;ra%4bb7hUj@91_Iu%_d;i#PBnZ@|BPy^%8 zspr96jjw&Qf!E=KOz4VCKx7LQa^f9B9=oKp4-#!>WXb*phII50HG3&5dSc#?OpYt1 zj<~!4hJ#}_GOWY8D#<9XrSPb%=r<;v^Mv6Xy|-zea1#SM<_RgLU}Y8Q-<0~5c|uwP z)Od2NkuKXjp`$vL$h0ud{m*ojs0knMHO>gP(!F zoKSPKkjgg)YE*@95YOUHw@O^t(+`+>#%v#FWJd+AxtXG(C+6{F(o*Oy%yO?(szYlG zb?eEQ%u$ue94LD9wM6U-nO--a>p8*X9VbW_P0|)$DL;|2bi3w5jkuYN+f!m$on(Y{ zlQWPNVk>9`6+~H1z>_nu)F^oZECO$3K*vsrl7g6}uqldp3YoOebKgsI;mpo_vRQ9! zP)Ci(5+dfnh@OzQsmW$h`)_N+%a%=36!Ugu(&amyZe9qtH;%*Qi&)B3jxEhQP?H00 zscI7(V#okmvGmf40C z@;=TiB$vkN#=I=H%&4f~>j~%Q3c0`7U?|7uLM)YIjxEiN)GT{fFUEoUSxn`Y(y9A! zhA|wrcI`?^RZ`$H38u$*S8FFObkV@J$xse^Q}L9cLP5%BQJFnd>WjDVmLcoS&o+j` z=Bw$=N7TeSC-XQcaZUyU2}TQND=_uWHQqWTX~MXb6_l+UD9Z_W9)WcF=h9A#q~+t! zDA2Ik(2kXdtR-{~)C(&STbKx%U^$t^?I!8qPB`EA4m%~wjAVUGQ|Jqb9^3MQqooUB zt06DTmKREiw2dTI(o6YWJF27?8p2UY6;D|z6r_9+m3e49qHTxh+{)j^VF4V7jmWM5Fz)Ls$-PKrUOwP{=Jw6sc z3svRqZZ+(G4 z8#Zs;peEWewxjr0U}r%QXGpvni$Aud*%l5e@^arc`n_Kn`ds*yx(^FO#iL7*ur?mzh`>U|SsAo*BH!EazL{;oS zww~EYld5g4bogyxOvm_GJ3q@(0u2eGB~tDKZ;&+v&w)`vL3a~+2%t2TVgut#>Wg?oDfS9` zjOv)NE$)d?MO{Ww*h`6J_b@~M$b4gbB|H3|fgSdGk`Z@VNs;eGGS6s?CvO9@9lMHq z8{E-1O~`d2s|4JK3D8y-uJXlF9yHQ^=y<3^-SK@b37e5x4z4R$P$LI|R0<=%8BhSQfd595l+JB0q>^&azrd!~PkY&f(cd{u{9mHtah5$%KzUg;{)3G3(?6d`Jei z#U(9?;DLderJin&4iCp~?;RdNnw)D>dxo7> z2(3Ngk(L6-D4F$=i_KI-eiW4MbVU{|c2ParWF0=ciY($w@G%tRu-@1GJN?kW3o_xc zrtEMwT1*w1g`gwC<4B-igs%yp+iYSVHwJ-bo43m8O0G_Lyzw0U@W`tK^@0LFA!lu7 zzOmHXfkRteFrR2B#~ckYm8r6Vw4X$64lH<_E)4tq`9Y(L&Fpv!rZ=BSJ)Pa}oq?5??Pdk$-N z>EG~lV>o7@IkNJU1L_;Qp@dI3Y6#&orLL_(IJ1wU*Np z^;x9S07|X9H9d{l@t{8wo^3Qo?rQo8QxfMnCyv$1PlClQj{x#kX_IzVGthy9kq%kRxUO*Ck6Dj|T+<^zH=7olFbQm>d zmvqEmtFQdlVeAl6IB;!tHC64b**Rz3{Y;iZOi)IU>D z@mZ*(C*sRUWY3XqSH&NqPsCR7+2#U2*vnVLthu4Jq9^7n$YfnyikCP~GIQK2KNES+3BE60zdi!wTr9C$@-UqThj*fSxQ&T)uX3Coldl4c`=$EQ`!dHJe4pE}s!bGvv zb$bI&R%jNq@gx!k&Q2%1)snGpax$_)%zcfVfNvv^E}=nmJ1n%ecbdFOstZfx?Z$I- zDoH@N8Zpt{LE2S8Tg2EImU`ixMsTdfFp}g`Y)QV0lFY!4rT${*He&A{_qvC7o4g|f zNkD`eG11;b8f!aGPBsSf9bB6BUV}L7CdCt?P>}NbsLTa97)xx@jqaeCehlw7f+HI_ zz92bJhza%q!r1LicSF0?p6fJpaXW6_2_IYy%c2Wrp)4lWhsa`|ms=pJG{=xZ_^_cI zeO`_&QVwJ(fj&YIw+D=Tyzo(@IJ%5w>wpL!%LFq$_(S-(5gfiui>i!Rh(Cz&2{LG6 zrV$KTTHU?i0hE1317wp#Iq1& zlAL&7Ay39goPDDCV#LAC?cu8i^UTVNnIJ2Ysfh74GSpu#g&{h%sXd+WbyIg#G8}tj zJY^*P3=L_2gWB@Dr$^Ygo_^*9;hU>bA26o0^%EX)r-szOMRiWjlW!|d8sM(f56xa5 zqs_OC<#1G0bVRNEuB5Fre1~aZ^`7gr7I!po8{2FreAl3k;)@Z*DrrdkdlYAP+0wKU zjsVUs!}m?z(Ossf(jSo#;Rhrf1_FI_yewl8$J6vLFKyqd<7nZB#&dWct3m=-e<{au zR97;7#AL9ZTSLIS0u#Z7@MB}|y)B%yMEVIyR|09i3j^e*roI-xn$Tn`PACdU@@F~m zsq*)G0q3mHtA?MOyrU{4}@x+*tFN;Y2HCFu9%?K|y)q4^=6_{zDk8N~_vU7&0&j{<|8EMHb9LQA(W4 z-4q;FI_=!y89sPj2K}i^!sYQ_;`ua(AFQ$isH7vx6-dGVFnt*ex#kYnLpm=QWX(=@ zW?HxnFCQWd;fhAT5Yd+Di%spg)*@`7nOLUw+X)P%>`F`-Kk8u5fc+?@R=jl@^D0`V zSGH6*J}MPrMXiw%>?(w5ssQ&`dw^R}W>FsSIbPK`j;^o9wV8!P*@qO)6;o}V0;Si) zlbn0PzQ%Fnr=~F!QX=g~5{*S%{JS(Iw@29D5RM+U(i0Xkl0Sg*+}q7=3I|J2PzRd& z@%a5R)dIWNxKH)CazCZ$QavcEws4DbT+_aqsXBUZMvHAX$CvEYDNA>9G~0Ufym%OW z#5GLYQ66K~UzQQ#nna9@n_YOR!nKUxs65sOjP?i=664xr@RKgIhI5U1+%>Wou44d) zQK5JeFBGJFa8_>XKVYZZTsXv(Pn`TYV?0S05|TcY(pQB-?o+O7x(<`OQVElWgs(?o zHl>9@mwO|zE)qwyK{(71j;55N$6~2UiF7zgtn2WqVpcZjPKP6m;Hc{*r)-Qa)g!4& z8)dMx5Uy`}j#^$R6vUU{Q5591INI>B@#GC5S`yzH=Lkm|%5m~*(gq3ccZj-5!ZAz& zH-{t#115snGuedC0=+619&TWG$C71@&thCsaH-? zU0fQ>;gX+lbE7!sIAgsXf02{~xE2<2kllmNb;TQn;ASoFMKgD{@Z^xBj1V zPZakyD{@Z?xBZ`UPZIZbD{@Z`xBs7WPZswMe4CVp0*8vSP9EUyY!B|I7OdojaE5^#Q}mj*Dx@O)GpWz+6fY#s&|t&n z++k-oo!m1sVUuwj8OjREP7aji1U!pCI($>z+TF(frL&FWaFo?lN{UGR9IDfqgAOO0d-ZZ(9%h(4aT zY!61YaG_~Cl3zp?q6oIBCD26#aekM!GQbnVtveSR#$mbDY)FbSBG3mT^WEHnTNkn8 zuQRbLpyf4XB}a4U|dx2O9(#-@WORE)9SVNG*K?RM|W3fEjBrhhV7PwbMfcPv`bT2 z#w}Z~t(43&C3HXG%g{EPg&$- zUJ{Y|BGuWdqerAM7_QZV;tm5iCRHh}0HZo$1Txs9(}WAP1QWL~Foa`psF}A#84-pg z9LZeRX4O5zxqgooFy6BoA2eGscEtXKmrlz$NGQnC&eizml$ z|CZK2=%;C-i^e9a|1+ecm1BCGRFuUch?d(dM1{T=(H!}94Q91kqJCx;uTKt|1NZ>Y zliu6dj_FO75jh9OG=;to(WT0Z)Y0j1U&HNP<&82T+>eA~kU8so@DX#_urOjVHFx3| z%l!?z84;GLz^1cIC$^lrQu6?&h9+alZ_J&_;em#B`0AJO8dFG=2a&=~4KJm(u;&Jc zC%WOmMsRd$DWbk$Nkr-op*r_d*^)tba5~P~a6vab)F|ul7t0i3?gLCf919h4;ysK! zd5xB)u&T<-^|3!0t8s7};e^n`jp!J6u^N>nV!J59g`}vDAeCP#&nQBN_j+~0BaP?q zWah|%yr=265P0?f!(`N3Rgem?G%f3)Eo9b3(2!MInx5ES(>q;g!sN?6P{&mauK zV-4n69x1kql{BRPIO^{MeT=&8Iqdllk2mGx@aswSq8O_cTbfUxCe6J0{w&rlXB+JR zR}ZQ{c%o@L%)A^sRqatJfu2MV>)VFbV9)w)3o!r_QZjTgJju{~zA;v3=%)D6eF1gp z0*OncT)usw$vT#A#}-+kAmJC~1mnORo_3)L6<%!0js%wu>ZSP7eF=5xfn3}^8;u7H z*0>ZM%Xs0Xrth%6a{O54V`756j4;iz((#J!0ELH%!gObVerA!U4_|Ijhy9folLXCy zaZ$lvk>SPsU5~AGrnh2S=^`HU>V#Js{LIOXrqB_-2v%iDaP-I!tI86RE5?!cux(!}M_ErSoyn?sglkwNuRC?(G8GN)vC zU^nnEynZzti!6?Xa7+VI;=F+zxpsKcMAnqN6K^z(!@FHMZDJ}SyorRguscKJo;GcG zv*|mUcWHWT3etWHwP~SaOc~cO*VVRUg^hG?H{J>x2PW0Kab2b09ZUf$oPE6}930qE_fCU4Dx5_gtq4&{oOh8!!=ij` zH=^|xy1P-N?>3sluqaDJVCjl+(-ihS#IpWn`*We&TWIYL?=_xd%8}!Wl><>okoOV7 zDwyt!V@Ev*?>B^_mZX>>lvSkv0qS!APRkAN&Xt%Stbd1(n)d29m{UNKAInL%nuzV3eI7aaxXC+g6pLN0 z$I=q%6C|<0q(y~`yS)v%eB_e`a#X3B6^Se)%BOOE%AW#L*>=XT~!}N&69##)*0{X9#`RExk_ou_eRNi|6RV=RkLmw1oNzQTqTj zgY#Sg`I?EufilxVOACTZ$ytT7L}GV{%&lC z#ZoL;=mQo(+AM{r(ElKslbB-FtUH+LqW_@j6#i*khjXuxh&_od?3EM+{THEZ5%5<} z_nXmK{cl5gjOM~wodu-)A1ZTto1UxNu9q0Ye@)x509EtqU6c{wa>phDHbQl0_B7%@ z;qv%(JVoUoq^Pl%D$9s)1riQM9^xC!YojTK1q99mF|oX&p&UyHSwpNGkZoszf?kQx zeE`+&^-h=xS2m2p=~vnFj`&i&3RT&~#@T=LgJWF_4|JKjql-0OIcDM`3L=jDCTWVgAF2FM8*wJLb8$axZ^ZQl%zLK$yTbm)bu4Ag zPy_wU_BagJOA31cv1O}@qvs3`y&h;hN10|rN?FW-q$JEi#FR}J^d51L4OcUeqwCGq ziacP*nhd_pO@ z7-Lc*U7Mtl#Z=7KJBzsQ4*y=qSnH723HFTTVKhgDoOlP5$NtX#yvsY?4l$Brwq{XB zNfZ*}P%`!p^*DXuuBFto%uhMav-aI<(nyQjSXH!y_bqa&K6ExJ^1NY$%AmAA0X z;aMw_bS#HfjzL8U$=;Z<*Mlr4q`YPp|D$tw(7?4pxY3j|mzx;MF`39ZQA8QZzZ#cT z5^l;QjCh@}+SG&#CBOi0&f#6$aIB@kVZ)i9NpK5QMa8}u+4KvcfpWP3*EY^?4>vcQ zr~gbbMJvk)aElC>#=C3NQwwoh-7QxG$T0=T0W{y}vuhAA+Knw@06oqKm!TVjkTP*R z88tS-EH*vPg*AryU)TuJ6m>1B+^-aGl!+Az+ePPFi-S%R#?0EaC#=D4rTHbC&9XF{ zj|daShe9tlv?7c>8nyaL&xuUWzOk;LtuJBU+ly7%FqOavrr)?-fhUE z5u3VWSg$a5hhY$IYcR)HRnxJXk~p^`hs`+sWv{h}Jv_HJhQqyE!;p&f??8PTbm+>$ zVw(jw#X|ut9IrBnwo&1QJc?2|RO<6$2j$qJhckuKwb7mX+;XRp5pzlttRd0jcV4iq$2%o)aQ_4{zZJG6V_@I)`doKRA!5>Z(<=+ z6YV0>_RXa<=;BU_iw&~+Qlfx#??hc06=)xr>0ujV9}8z;iTCw|I~&4r88aJ)6qFAO zmGnfs3yG!g(#HMmop4tp?cH~giuCVBeb(8j!8EUR#VHD!Y2ofhaJ0)5mpep8WrVl~ z5%f8hya91errl;uBRNVfOQ_It0G3IiC*(aXS9udsTW;!>u^c0~?K%?7LQ)cCiWGVJ zc{6ZYIc*!l(by|<99^{&bp)6ufUck9L0rOK6M7*-#~=<%xyJpNg+!Ujl)|B_0~%r0 zD2_>H5n07m1f?a=96@Q~GHmM{Prx~1qd1lhO7^HJNc)nUiFD;L8ZrbxMS5|zL2xA#;3Lfl& zg)@g;^qn5gH})+F_dvu616x&B->mve^hJCEsRpJ*E;5m9*O;0Q=^Mc=;f4NAjJE@% zI_$9Yc>LB`LJ6rA6f=-{D41!m;VytFoTuY!0&w=>oXx4}E#Q_FM}^ zts#z18PhR$sAwu3`!fc=T`3UmZB)l+H3j9=LM1&B??WQ1uh}hWl{GCP zhj3q`I`V2NqFSh;Df0bD=4`O^6}r3o0iX2#hIGu>Eqe8nMlrD-Ko)n$;T+6jc%aEU z46n+7NC8PcC`;m?*C0IjGDuQDk`JLIO9a(}r(tJ%OX*;2c&O<+dh--r6*DNPlJU3?N4=;5)J0*4t;GtbOI zqCAcieo&ZJ>AH=0WpsGFVH|C#h^@*~1f!TxPaulZ@-$TGLY5GoXdH)mUNg!?84;dD z0{v?#*KskGdwH;y`efrc)-*>1l%ri#mKc;+3KcF|#vEqzZnqNC3(M8qtDBbov~gJ6y?Yz*;v2i=+#@5_3o zQ62VCp&)7zEChvo7Ln=v5gnws1wK66Kx>ee35~C0V5j)feGYY5Ny@ur&gcyBj@{1i zTmv{vt`&F6Bt;>gMAKq-(ZfNR3onnn1>cH z=_GelbY#8U2oASHjU!7c(tic@x&OqLmtJYw$K%%%YAIu>A>`;1eHBHuxhcLciahrE zGc(y4{Awetn@m{F8EkSQzJ^2%H(NrV(@j36(;n^)uQis#(SZ4#;wne&Q6*tsM-0cC zVw*_XbMkuQI6BK^_nbsAq253gKl~Ebinx`&$D23bXc$MMQ%vQhtReL`QJwuhHmOm3 zdLC^^R|5^gn~mWZp&4I@aUp@;LJ&Vp_7P!E8h6};w;ILqVRB@Vav&)Q^EP7G!&_I- zL^rk6neOg{b@z5-Ic5;1Aee`J=qc+xb*`_4UhsfaqNZzm#55G2N zLVuyz*{#dj@Y<6x9Sa~?Nky3hb+SspN0i7Z+txMn%|ER%^!>=b+k>n9ev8DMj zYI4_IZhz=Du%(_`7SWyu;p0Ydm{D0n*~x)XK|w!3D2Hw>m!L)W+ToK1a`-7L%Pdlm z@~5aAm)YT#_k17VHs7q$N}jH@fjjm-LP5;mz-WH!O?3Nx}PzzqF{Go}V{{qovkNw6c&WU&vX>I~H;0!7d#s`J!PQQ}i5L z*~x({CD4}$qUlho6mjLc+rxh4FB{F#lV^!)B`frV{7N-%=;+qs&dXilt47<~yk#kY zzD5u$bGizK)8Vfh!ZDzyc*;+qA?W14Fs;hV;Bw9c%NK=E>q zQ9(h!l{3VPGSUg-ZyU(rWnw%*3IPd!hr(<*ymyDUk2iL8I@_o5KK6G_-r=aR*pX4u z66t#+aZYNkLRf(4)N(L<-++#Zca9Y^WuZ|?aeqKAn{B%O1Eq^OFu*@Fo}<~;+#`~O zMEMaZN05@wWMWh7JZ_TBhDtn_`D0_Ag}27OB&u^lu{-+1A9 zr*FISmJQ*jhII_OHOE{{Nt~aNLz5OalXe%-D`T38g%e$?_j4mTtn(aS`OASqOt4=N zM$0AtcQwb1`=#O5O)k%@FoG+~3HU1lSzoeEHQ(6{x56O&+E|W4%#p>)futnNZ-_Yz z7;d}7_SR0Pu_4|~w~ar?spM}BdEw;j6+##Q7qB=aQ6-@GPh$)v=65;Ixg*CR0Ci@+ zw*)xyoFfaF14&7kKM*6s2F)+sKigm6GBa1)2H}rJbMyf8mX=nPiXaviOj6iC5laIE zzn<&tZs33TDg4=J4$~k<)q;OnMufj)!mdthJD=~t{C4-A@K+-^Oq?85rI!QJ5#nz| z$Y&W}+?mIQTkN~VX&Km@e>ae$)z!SZQx+5LAEe2kvQp->1Q$2|b2X^qqr^H<5fkcP zM6ow1YD|7dD*Ss@uyhknrTS$t!Tv+o@~!Sp=bp1^efY0Y|9`YPF_f6g-3*Cgy-YSC z2b4X#!{za7_d%-~b27?^a0L=*<7GPo{U$GJq@NQzUan{)$GoT_p;E5=F3O60C6d)` z_uE=1U*B(U!-Dx%Z4bM$f!85%6NbS&2NN*|%5nl;g+Ok+#{=3+sHayo#EFv;l{L61 zA=!N>%Z892fn9Slh7mf5_BDW`Ypi*iWhyI)vtQ=WEfy~erN z6cg$IqWEc2-O<&)JT-|bfg@IibNB*kYTt;Ocn6WkHkTJGHt4j)=pMzZ8P;+1Gz%$@ zIZ&f2{MCu4e+8xrHb74`HuZ*UPu&u(VMK@dm*a|<15rqjYZAh0k~|h@gY8ytXKN6y zWf(^_squH1k~r5Uhh0Yc%kBWj@Ot4o25}5!HIOJH!oehPoGH5FSsu#dLT5O{IF8DZ z1!9?%t|?1u!W~K+zs|X>_U5gP^_{7u*{ztZUDrsC9BJ`3&ICqfgt#6NoOqaru{+q> z!LeD~{jrUg*&JpphuL8w%9e#jCB;3QTn=odwdIBFIFyH4afBfqv+GiPK}+GJC*G0d z(Qu`M7&l8{`msmHa;|SM#~Q|no4KSS#8E`-$D*XaYI|q8vojoR>Z@DdN%5t740ZQa zQQ>e;3!6{F4NTTCyRG=1C?MS%QkSlMG}&3aMuaOk@wC&8OxjVma?ITFO-6tl6Tlr6 z#RF1ldKPYC6i36aeLyNH33O9}*eq$v^8gr&5BDPTUWah3p&ZRJ3n)W5P*M}_X2g|! zcJX?wn;Xbs&}KJXsK*h_kn$a<%rSV-o1UA72XGFD zd!|FUqp3Sudy1=48PySE9T~Et^Y-+qwXG&*;^9=IIQ)#9*z1N(aV(UyL^_S6{aE}e zC=Dj38^bXf7{7}q4=3v>%`dw++|w8=wqSW}FlEP=Wi%z+;w!})Da%SSt1YWY-6J$gs)e9pBch93|dD&=KJ*64-4f&jL)=v^OlAZ5T(lS>u=} zq(nN0q{BhtxW(JP@ZZ;OK6lgTvkvDP)G@8iO3GRe)X6FV=P?1AvSSK}YfIqJz^1CO z*@%vh#calEpr5jc^tWVvm|>V)^X7m26V5k%hsBbjDnlbOB3wYiby0-*C9q9fo2Q?7 z>IJ8szHP$=&9g2%@9ePEXpYTcSw^@V7}FGb8__g~<1H->{1f2$?DC%a3ytaM1yXz= z$F;<|h%ELud3v~U9uIr+G~C5Ta#YD2Tc{k!QUdYJf=-pp zgch#7hYz}Cq?0BWWM$rwG^9S2Gh^LlvkAa7Hp#RN;jlO?y2{Q%Sxl^HvN$=g0g<;x zN1s#Z7;W9;44Hsv7AoY#n;}nr{4}e_r|jb3Ox~a{Ye>fuXs!;`x=|7obdFFu_~II0 z=o-W^rdJ$%Q9`npP$Q2rZbfBh58_ zn)!HjNsr1mg0j{42CrRc%!dB3rKfo*2P?CCO$v1%XE)lJ{$062E2B8Txlv5qf2!+RXNh9 z(~#V3-;B;{7~Xo8V2@E8p06aM^e-zZ@}(qmc8i2@1|9y-IE_{1hLNRsFDjP&o@wXJ zy-n9qyK34~lo8=RB+y{6Gev11QMj)`9HxoIj+HpS5|*??x*ti`1F5{R9M83Egy(-_ zKWyft^!~;?YjT0DxW9Z<&7;CG4~1o|1JkmH0k6+E+l@T@Z9z~wK z$8*-BV*6GnJlar>b&{H+i0qmvB+6q**$+iV<0Mv<@K^&l8gZq}3K_{ij`H+mq_*Dg z;Sw0^0(`t79JAe;ej@3J@r2CCh6`%)0v6$)XbeX=<+!R~IS_>ec@iNsh0|`^_|IYs zXIq|Z9LM~nX0vV~CDKz!;;3qu1F_TH!lm4e*hITkT|{`QL01@6^KfKJ%AJL3bd`dq zF$Ju7>=Mw#@@=5u=>~O7pmJm}a{wiaTM(m{;u*xSwc<+F9eQLeJktP*-4Ynnz!c)5Wbqlv{=ZY(5woob@J zf;4r+t*KtC*V_|c?F+9olA}rE1=AQ;Bn#>7K2csp3QI26bP z$T|wFG*~b3<|u9m3YBXJ!W)fzHvW7<=TbBDFRQF{you>Z>qj`Nf$M$WY`o(p^Uzjf zdm1zpTbgg7CSB>n{`O9NkjHi}1wQL&gA-frrSu724y zQ32jTz+p&z+&3Rjv^Oq@8;R4q0pXp-J7;nsjR+}IBflAyRvO;LG?Y8lwEOklhIMpC zxgX6RNWF6u668IEXvHADmblbIC0*z*VGZ%U#&MW8HT#bWDUses5?y2E$=11ekrn#^ z!uySNSx@mMMIk>xBxjPz_`)`>VY4^vx55XF=ji=vOgB>!=R@SMI>Kh)MLB`>C4ATz zjy@^H)MqNINdF_$KN9-+MdSgxu=!4tN4@Zv#58WcX|}Mu{ZS*Hg+HFqOJpq}bD&;W ziTD^3K_^D}_Rr+Rz>w3z{gEFxzGJzkkcquV5yqYwZS3Qw3doB73DU1uDgs<%+U1#_ zM*0DJqCRPO$ECM9$0OMv7Zvfa7pTb*bFM$coijr`Rga5vG(3kdn!3Zmp5v>G za-a|s>`R0tD_>2Guet0FUp9{8Q`EIJ$%yb35@_Ye^{llGE@4dP2NAw%pmogg%9n{` z1!X7)%5nmJjX>H?=H%xh`qEPPy0IJ`utknFEU(T-DRI6*4jVAtkMUeee5f5Wy-tr0 zxP)&S(XsclkP)&7R#XN57UA)`wT6wY%}rZ3ZrZpxeA|GIF;y?@^GW@6@Gh)gLXuJBLGM-#2YXiWxV4j}Z|8en0@5eCiFR zPFS1zl^+_y(d29T6;l%DN93@2r6GEIcOF;cN}P=9s5~MIZTp89O$E`lK+OEo3x{Va%6pn97z8v>AxW61}LCnHKP^UbC^}f6%Fmk z!~M`xd@}x*hTei0%d~$JT4n`jS$U=BS4^K5y}59%MS+gjSEgaZ+){fI~{&+@*Zb1 zVk!$|35oxK;;h{^weR*|6Ai;3P2bUZT4X^i6s5%ZQ|81CKJmB{zH|7qaU3)Da%7>( zzkr%he<6yW!_oowLHMh2_TJ9{9TEOU0(USNp^4?Fc^xals`%dx<#5rP#Awl#1;zXa znVb)%o&*3{a;k44U*?OSn|ZIT^F|f+aL}bB*%}rN#3kNOtAkD zCUY86tanj&WmJ#7{HwS?>NIuqs{^}fT&Y3@t*N9vm|1};<)qTw&$QVlUIrj%td06_PouU$Te`lySlpx9`#NxvKFTlQqRvfsU$( zuusk#${`){l1DDThBg=WHI`$ZQm6=61V=@M-j8TH*7fAKUJBpeSZne733a9NQaFRs zJ%GCVBmN8?xtixY7ol7H|i+pC2abb&cgHs+#e^l*G9nIqbLdSqn9xaF{_HBWsST4CR1yggBfCj@@Z9 zq}~JyM;O7;xulo^lvSjEWX?i5l)(zsp9$ADfMe*Xxm6&_h;S4M97(Z_CoL)D*Gj|D zhH)65rTEHADV%i0JBB{Sjg0J=Bh@VYWFb**ObVNojw8TRIBeQ}$Z!)Qd8&H(4iTM@DU^h|DN$@v z`NEkYE`G*6)%|d+aU6DWj;%7xfh;A^%?K*FKIi)!y>D(1M}@C($4Eu`x1fHSSI!S` zAxgNVX*;?%^j%6j{#9&gHmJED3g?mmt}>73sg5(%6Yw7I9U9Fw@xwv=q#fV6vZor^Gg+{EaMK~VqY)Vr( zx*fUcGo*EPT&fUmZ#c&YhR>j=xm^@GLfnA}_Qx?9aeHER$fwPUcQoGF_`?Z}Wkg5N zk>8ALgH#IEF$FAKyGw{aU}X{eSK642oN9E(T9IjpY@rcFQBNaPMVp&iuRm;t(+%br z0G4U96cS@S85~>lQC8fYx@Rr_@5HddK#o~zj;~DRKp`gB#tbvh@Dit~^L)bN41+mr zIEycug-lJfGf6uZS*wn;BWFkld-!HSzXz*jI&88;+&)Y)n6{lQWDQ%5;^rt zRu%ri2kV8iEgg>5Q)8Va9Wl-!BMn!JQa{(!S2tWKwlvS9Cdcx8;|A??ZLisE+K#b2 z$5zSZK$a3{3qc%9tR6R;x}^X7)o?7bKo*Kp;#@$^{@^5weX7%03|oz{cdj6cNPSyY z&qLKL&ahu->W*n}b)bTPlrN$(Kg8np*|-TGCMqAt2p5~Y<3r@wDxVy{h$tz6?v%5T zc5?7xlW=EaI0~p{3kXG|eiy3Czc}DhU5It*t|spo7)vE&3evtCwQb0qZ7pD%#obq< zTp4nuAmw{dIkl|#FmTg!SJ$!>TblQzCJl1FHpko3y7<&Elf%*?HtLwXqeD&t`lK~t zqD_&;vO($LIJhnlZ5za~QdKJT6kod2)TP%4|9u#5`tF$Q@%a6O0lFj`qf2#$sw{7P z5uPBKHBm=eE8h|Er8-Adb}@N(*Jkv<+$YvGafi8@BdfS`ASnrR2{H63;b}zdp^e8w z`Bj@RZhK+7!Pen#CKOMS5UW;B#CZ}&o%S%y77XMVTCA%S%0e*kMCg&QKl8K{hPdn- zzDo?H25@u=HS<%7NPUs&ql;i|+fj&XXYi)R6xP7w?QlDcx@GzDo8+8bR$l1|Ob@?c z+C~RY0B&Gf$h+e&_|uz6Yg#DdtI^POELEuSKl4(d)d`@EA98AIw$;i z0uozMd((}y|Lio{H1{`*W5RC=`n(n@>52FN61j9zo+iS2;2I(JC*V@*2O8C}iK9>% zpL~{8#eNXk+$;!_S3XZ39FI=a2OHEeTIN_0X`%dsSPvm`Q?C6g+@-R9(*@y?Chb^OsM+ZP4XHnh>PI5g z=m6pw>(bdv40c1Dc<1TMM;q^K{P8kloM|k#fM%yU3uUF^F-!$5CmW`kP4ja-*3j#g zN9WGj1XPk1D&)j_9C@sH_6K>rIy~NJj+$rDM`>A1tS6Agd14wj_z%p&@I(VS>R!#5 zlXS#*5*Z`j2A)U{Pd3EfdmAVu##6}PmMGl&7@lg%4&$xrQftR$p{G%hV{*}Hz(ZxX zEIi#1jw7sv#CX4BlFG(2NTovxehzqmbZcNIz+C6<&$YuPY=jNZG_+$yHS4Gfj5t`gk$utSB2DB^2K+#f{V@q~`QhkwIsjpT51jNB5H6eRsRO2_>D zqy`ZyG2k>E{tO@!mk*{>%!VBjbZ0+=}={!+5g7_qBOtc1c0f zZ=y6uR(m)AZ+74{jXii2$pDU#)uM}Hp)4lWTgciUEOb7cmurUdR--sz$Yn0w@h;{gr30-qmP(E@1*C?5sfbSrXy>R)lOZn;EX)wpvFI^xi zUR6!3cag=JV`{><(4#kv2|{?cp&aHyP5aM6qP!>PC_7ZPre_hudky0lS8{A+CkL{W zK<^`neR7M-0sYXy5-jcleZNs0nX#zCScpPme1Hu4+Vlz-|L?@`K?6A2OXVFgDMc z?J(o-Cv07sN>3exPa5wo`11+vuuy|1#q)?l0aCI)HJ0Hu;nS9Yy~}XCJ>xS}W@p2l zj+i9t7_1z}I6o9VYYazgp@OP#os_ciIl_`pM;9I9$c_n+6q`^RTYI zZ6t@$TQeXf9WlN`20vi3zvCusl;j|M*D#(Y7SCiVLt|1ReJ_*BRb>nJXNB(@X&q8> z8LEopMEe10>OIn;6IT9q;RANKiMWp^Ss?&r>{{c;(_R!Bw-!ZLA zQOo*KMucCGa4isKmf*bZG!WG0GAW{|&`Clq7dNs@rc@qg>;~pNX!f-%*+0BmD^qk~=GYZ|aWkQKKGZ zl=eT6z%62$BjJG}lXetIF32?2MhVIODJL8ob+L~!?Q;r$Hg!kBQ&g4th>Qq-A>kM# z+C)<2Em_-bQ&>nS1We^Hu^)25kxDlctL`?oRH z;V+gc#O#MN0cFHOg`9Z*A@5+Q#qneGnzr_>!ha3v=tN5zf|kN|Ohj2U5wBoLo25z$(HnHik)x$h)8mrA$ zH0n8%i?*z!jFx{Ogz&Rs1z+!DDX)CzKn&^#UlY;4+ULC%gvCeX^l2+ucD6B+W zoryR$<}EGAjdsD@7tHN?JW&uG_1CcUT(o?8kkxdDa%&Myb;b(72T|It$+V4hAk8Kg zim?~{T9$;p??954NY^HbTU*mGF2^tGSO;dnbymZwF#}3kVjWD@5hEq8^0nI%Rnp-Q z!>;hvCzW!Iu+nfS)4=A-iF*^{czoRBy2kW)Ys!;<(~$i2D9=T&>_OAraH9`(9cC=Y zqF2^YtvO>p%jt&jj4VB)wB2U96Ug96O;GUXU0@9l{KV()2tqE2jGiX zgSl|5sXG!cM@*D2i%9-vl;=`6I@oY?Q+13i)u};xIp!7=q$yIm5wsO%;on=r4x3vV zVIBTt!q}A+R4_S!4#VV>g9d^8xH^LySHQz@#&MX-mBYJHLfXeun?`>N$DL_a@qXLE z(iDctHAZmM(Hvbl$pO<6YAsP52Fw=14ieC&;NLr8HLRN1IqzIeT4J48%@*(b#36aO zZBAMZD(7S*Tc#z{$wcw%#5oCe2I6q)tqkI58)K6hX-NE(ERMyk=%R(Se`^z8-Ktm? zkn(M)T<65VPKt0_BREXks=q>IRsAJYg}oiIH)37JL4^kYuQ^y+Tn67HJdhKaQZhdUbGvB;SO#IF$(Z5?UX zK~8Mih1-_q`qP+P!Ur5qHJ)P{V-g}-s4OVvX=JMRhe!^CfoPCv>47X+}>?Wz$VkBSXkVT|_9`$(;fTxz(=)z``J|4fG&|pX2P-Aq7ZlNfx zrqqh?PrCf-d=qy}fNR>LDT#9dIcfEb_f;*E!^Dz zj*nw8NBdsrC=d4_;i@3;t(n&RAT&+f(_e7$1^3P*w?@*C@I5JfFod}*%EzMdC}XD; znse|6;31~aGLGZ;t7#|`78+3$b&6E=B5ity_|gI%l?`o!IqKSMJDTVCw~&zfG}YOM zpr6#U)S+X-$Klr#`jArZs@T$;p=N2!oA1mF!>q|VdcC?T1QDsvQJtf>JtKmN{;;#q zjIRcFjpG=_O+p{RLS;cQFCml0b9Z5ueg`e%4(ODg4%-drFjI1T@p6D4KZ*%9PZ+CN zK7q*F;hNDt!`nS!!GN9|rUQ{NgDEg7C}@w+(hAffE}}DzqlVRdvBkIowMbJ4}qiuK;q?&?5|_|G;pYz!u~}Z)Pqk2 zbIR~wLprR76g3uKSw@71kZ=G9sUYHz^-u#i8bnPI7DLy=sLv_KVt>%xt*%}b;NeDa z>}x6FDi=kNQp(07h&ce5QoEiHk2HuQ8#Py-vt7qa7av9aVuiQehfD9#rtYX`V@?rD zDCv)(G?)9aI~1O(@K}?*C4M=fJ|T{ZB*vBA{V6iaR*@kn4Gtd5DW)A!L;AwcW z5gg9+q@c{ysfqU#^0*JVbd{IRQ$N*E4&$U0Iab`_o-vAv^t7BaTb@C|@pE$49-eNv z6^>O}%w@Jo6nY{)gG5@Btqxs(@Dag-@=OCcI^+~lc`AuW{aI9}M=akMH)!qVY88&% zU_l@6i8Z@DY`V;^nLOLL7bDJu+MXq&p78lsD^irO=j3ds{t9UthUXgIk?op}jv`Wj z9@Sa#XXhI`dvKYy_Jrq~xTE4Do-$O#m+lLw%LPDt(MqR@x6n-6*1w>W9))?Kp;lM` z%$q2NZ!+z z8p<(duk zj>o9+mS%XR0UaH3A)~w%L2MZ;s|x-q!tE{*P7+>iEXQ=T+#Sy$wlrTuOnj`^a+7SBRSOQg4v#LBjaOWbiY`JV80BRKlk$^vB;k@Pz#&3-VRrcEAHuJ6&L zZr-XNPQklX;ho0Yg1?Muw1m6Z1}DYAa!Ca=)i72a=L$GkHa z6;I`*pP?b`4^W%MneW_e&9`=~g)=aG&g!3nST{u*!ra59WIcP6Vw!>{VCJtCNpfg zg1cxseA={Es3=9&w7;FfXmDxUQ<4(oGlbALS@dx;IBggB1mkB7<(O$$e0@j@b+eji zpCgS6EP0d9X3S~h9-+@0%u#D{bg^>4w1oNsQMBW-IfTN6OO6N52H}eabL1=w2$lmS zHQ~NQ9L?N|rAQme;0y)#M##D=li}3*~!LzD5SC z0*fi19ZRRizHT5#fmwWkEQBJ9YRb_!NV@_HY&v|?)Q`t6CJbbifwLP;oNrN1y|#RF zywNp$+hq5~XJMDZeuvUWAz`_qZO|JO8+tptxbl(}YO9VS;rx;T&zYQ$tTs}Imy=eJ zeUAw_9Ll3TQ!;{Zm7v~w`o1N?F*_=hl#C+Cu4q(PN%#Siz-m6k*?Y{|+wt!B9~#q9 z%^6j+5e>=zi1NvRU(($dKQ`sn8SsiN&7V+{Gm>cS@cwxuyut0V>F4lMgPcA2yN${y zBcs16DJua#V*=Re71x7zV%-TpH?YIVA6apamxCuAVSYgjKTS3W+M$mHj9;z>k)tYC zIUpS&enkY$mvlf4m88q91J}Up4!<>dNBydF5J^MA zzoW3axY%SjZEoZr!tagY=;Gk&!;KK#eyqstEMjV+{ed*DGAu16(>?srAda36(exoK zx>Wx}RetA@TP4DuP1!LaO;M#jA|t|Ia=O)(w&1)Ce>L^huZ~bax__fChve+X(iLN| z?84uT;3)nqpbX?dNlmza5O+ukXKu3&)=M#E`==otiy4`+k~=Qy3jQy`4*X#tuPl%%+?bkhSI9UsRGPH9v80Jj*H<*$I%Il+iC_ZaS*VZ` z?@HvcH^aUL4tKl+6w@47ZC5s!<1>|ZM~tY5aTPL-0mCfv%G@Q5vE5!fT-B(Xm(QZ< zxF#)yx>B=G&T-nO7kjZL&YkURY)AiHv&+wv#MzG=j(8jK&THr5e%Ri?B@X9eOL5rW zK#ppovlqJf=e=)|+B0UsWY>4kw38W@=FL@v`qD4B?ndDPsI$*pl6d64E|0=Rs{m%LAb^ zz0~f6>l?uFb&A-c6u~Ga)KNsy1%`EIcvstSfHk;RI~=_lQYo@fr7*=rI))_LHQ0#* z|2ljvgYDhL3w!h2Ky(A6t;1hT7`RM8EDIHK;@yxu_IOyZ&~wd;;YLPr*p;Pqju>64 zH>T==Oe0Ky9S#0J-YN(;F=fYCn4;327*P@6rUV=Sz~ZiH4!DcE_%)6-eMiM&OchO8 zLgF{0IE_6kGWGJi{8Jo{PVpP@4;} zn7Ng;m5{Ye2*=1$^;yF9lM{^VnBCOZLdXblA`u4ykyg0j48R_naMEfBHH+g>Mud|| zP|uSe#^naIr9rrrF&u4)PvXXM8&whF6e7x908SQ}_+E4Y@db(7P?b(JoA}Lz+nTJy zmtE=jIrEE!emg32?8;6Bta+?m!s17`z3H#u&(4-rj;(UZfh?thxdTDTgVdVh{;@k6 z;5gQ+m0xx*b&ZNG&2`jd$Calscuuk(PBmRemCP}f@Eo9w0H+Z^qs68wXn8}d%nrio zMsW;_7F`$%WiheVlf{nGY?@IwU)W$oM@MNQB3fuvQrwN18=YloJqjnr`U~+`_Zh}L zbMkkJBJqmy*h-d>75hxGS-!c+V+8~kC-BkEO-6N8zq)2`QX-v|Nw{J|lS;d_Ae?O^ zhndglv65GHggGZ;Qi?EfMP_uo>0HC?U4{xNk!pkPvTg-X2IfmJyfL5^hUfRi3`s{%b<=sjZ7tHGEQ$Qnj%k; zd}WZ8$seGPsysnU%eXG44m&Qp+fBDU`GZbi;`7FMHM+R2^bOSeZ|cC2Z>g7eU=&^KYn zRH|m8LlLPjQk{Fx<2~GAhY34oo|P&ZB_tbiqA|W;3wUd|a|XwTOxrOUl#I?SAmt&I zlf#gUt?**77YIux?kEvV!qPMXKNKPY?954zZ?|`GDbjq{W$KQxeeC^rX-IrG#jk_n zmUk@f1jVH!^E^I=>vZ=R#8}4m1M;#lv;W0}{{5}+C9~B=e<5~=AIv$G~ zZqq|(;l9RkES>yF1V>x<|Qr%ARrTc)Kp}25Rxp{BR z+VDVwIIMt@h;md47gz7CSu65`NWKMfWQS()HT5F;JXW)DVIx*pFpt@T7v*>X^q`4- z#@oY#Ejf2a)Cs+&DJwA+a&VNwhcJaK^JLsl_lM!3#&k4|R?#L=Ge+VID;c2Tu&* zdK{aBh`v{Nw1FJk$Z~Xna=^5NdJIu4Fr)(?kPlHzhsPRZb-jBQkn-cGOmj5edUQr- zxPhkV*7(-M;|<|30>-XT8Wj`n3B=K3q^HfNmvFxIiN+v+AW-G zJ=qYBNqtozeij&F-Q>S#zeibtpF%Lb?rF@=_aDn>c&c$7*8GTopd-J*If`sTJdMme zBC|hBBl6P?x7rc85K*R{L4aI`7$&(#X@Gxukl~p|aCn;-DGmCgGGaW73=YhB6&U>= z#;ajG;~Ac9q;>e430+;y+?(B8At&NRuD6gwvwG z6xtn*QDMHjfX(sGH-KZI+2ZT#TgcQzdjVa)}DB5`%VsQ$<+mcnQ;SWNkWlA7`Fd zg_l|)9OrpfN<>*zDR>!EK(i;c$2M-P#h$(1EVe`U=du6w<%V`l&5}sDfma1ZeFdo; zUFbK(T%E_odMGS730`Tub@=a-+C~vQGK+AO6YW)`aTwrEeKeg}T+%YvZ4Y>hRwumL zfR1sqw3B>HNvzk9#da7E=wgR!c&z~(^YpQEy-<+w>nN-Zd(-lWh1Z*`!^hT6i+cDM zKI^FX1}gH?$I)?XXLvd{z{Bm{o8HC+_BR?~g~OtG-C%5h9Me-a-b5&8h_a1(;myWy zSnrh?Vw8~VTPVw7=XEd*{$F^jDLcA9MN<_r0qMStx@oN=EO3-~yUDI@twXV;`3`Eb zlf!k?>BFb{JMgWF@-@N=u-35hNRK&?SXi9bmV2S_|RbhH~_$3o*Jl z>E}X9r1y|S-)7p(GPUO9v=!#P26D8nnyoOVB+mQDu~AO0ymzm@z0z13g9lfF+zHCIt zl&8>%%w=VT{|fQ+F=5XsM|eHV5WZ?aM_VfGRy74_e~sFl=yu!D6%EgF__|3u`Y%L` zwW|=2@;9i=NmKGmpfT;jmc;N)6L&1~)%Y}$ju_t}gCzqSh_yPJhu=1Wqhw0flPO61 zJJdc9+Nom0O~*%RzH8!p*CwKj2;U>&S|re_mB)4-ceV9-XJz=lan|84Cfb}?K&MZ~ z<;42|c^rE&I}Gva;BIQ^KQx+S+Ehf3AHJ$4*pCQf(~KwMTGRP{*dH6p;dZK=jWZP? zenJGt0o1D=oCJJ@616*r!cUFl@HOXHu`*j|L{ZeAkxIKF)eD{dTxbopho2kI(dpE5 zm!>4nFUVp0foD?7E%U9Z4lXAArBNJC2gcP$8Wj=XR|K#+OySNdJ$gr5R>!Y?Z45^n zWppvdbcFd0F$V&ZH}14{&cb~7?P?G?sxUbq9U*>41ao40Uz)i3PPexI-Y^b_nnf4K zLRn0#Kaj=Ine7!l2Kt2Q{;okjhWAI~IjpFWMVc`!;r>J%S2l`W@T%f)QnLnOGW=&F zI$Y+&nJ*jNz5blnWU#u0jT9RC&PynJku`?)%3cVlE9BcC~MsyhG;CxsooX$nhu)l#FF1f6syyn2Dpr8j3O1r!A zB2iiwpfffcXy9}4hZFjtijMe|-<7nLhJ%;}nwa>&xRDlJ0H>H&GpeKCC^bD(koMK7 z&CVz`forTrwBmP?f<&)LQCjqIkscQ@;Oet$nY3deacp&88WO)Y#rZ{Sd6Pfn%Z%fW z&g&S<;TSd@eG3bXsfvDZrYGYmKE#!7q*h=M4l(-KNalq4FruRrjQnOqSt&S_DPV1g zmvl}I>A4KoHKxNP8@uTZdiIIR5?v-}~XBK@PNUm9dN4TqUN9ga4{-VL%@ zNR(qpIS9Eh>raeQC1dLIkAwGM7bd;95vwjT*95FOFdpts9n+FMn>{jkTJeE z<62_fm@Ix5{CBRT@*kY6x^^btQHD2`Od1nRgpvJvX-GxCo)_{&R zE0Yq^LUqDQ!_AlmcBJU3aIE#_#&Yz|m5x*j61@dQIXy4DHN&YUuW;}6!z~Tr@WK@W z%1;qAJpmg8vWBO2z<-*^b2!dmjv8Lmh_a9<$CJVdjMYmWkk+{aoVZ?NEXP1@63Ul_ z%7S99C6iMLmR`Gs$!45z>c?=xYLJD3m_<-CSP2SwB9WXErt*mU1n}DLd^l+}ypiZa zj{Js#;+;(1mB8aZ-*78KI6P>T8b|?2o4|z8sRx0Y zpRkK-72$MaIp)eWt2m2MM~wAkaFm+vw7Uy9ynxK4#qqGgIF7kZj<0OxKp`gBM#AJ) z!{skD>~$GGe>lTXjvB~oUlcnw)f`H{7U@iq_`wD{xQFxvcnxPUVaHI_n#M*8Ox!jZ z(Xovvs|c6_RY`%*BA8uLUiI046P*|F4Z3i)5gmEV(M8Mw(-P_&qSzbF_i?u3TvK0t zZ$tq}o<~U@7A=OaSXXe&KP`eZ`@Q)+VYAU3GqOTVxhaCR+Dgb4CgeaQr0_!zJDqU8 zVI3V}5mneC2qi%-AVgy^wi-9_IdO=ny&bi2t6?0y2lpu}vM3gcQsQhQN7h+B?uljW zaG^mQI}pZCj6y=<7g3xgm0hL_t=(wz7n{1HkH}G#gB*~K5O*SiegRI#<3ShhoNdOX z_UUkEV>y~qNrUScrC>=?*t-zRri6ZJw$op1!1dE#I~%YVdsibmno^D)Ye7{@sJjux zl}n5_Gq`?uS1;V%5Xa;96XuwuVJb$K>OFD>(pI`%(Z=SbEKO7Q)G%H13SEUCL^kas-j|FLN*84jZCH)2RmtzU^xPZK2QVuChwTiwh=j2 zzEKq+7Ku0r$xpR3zo53m2#$`kriLdSF#;KEM0R?b&i*hkgu?>1sLF|jC?v*^j2rQD z)2QVC+0IWqY?1f7qOWGlnhs0GJ`ZswbZe3n*Cnp>>|}c6B8t7%)E+#99(Ebl^WDb# zJE=&2H}yHSwW3HT(&LjLdko~5+M2{@*_H*xyp&8fDZ9kD9oo&g9lOK-8O|{fS#04f zl(a;;7fCe7%lmjbe2W*)7#Yi9j_3G70g}mAf&m4d2&L>yVZSwWJ(d zxbiP3C)oW6V|QQPe6?e#4_o~HtDzOqW7{smgNEcpdjM(N?uoM{*uWJYXb6X~Rhg?t z3CTW)vL!#;{3T0h&kr_nN9C^Zvq?qz51~FkfBGvNSL))78?GB3gohf%QPpZFB`vWY zMpnt!%GZc^N-8|uK#uOIW|+uAqCA2WHl5;N9|lV9e}1H49M!pqtr}JYqnJ>SB8qdq zWFv9Upf**&pEniwJjJ68=@{&5&OeMPiu)LHX&zgP#>=QNUFstqyBex#jP?;DilRP_ zRE{lqZ7uE?d%VFM!&ijBXA&lr*PK2v0Pa!-gq|h*t`i zMMZuR$p?`fPqq#RZ5?ZRvH=~7Fe5uJ;9^Z80z8EPR(|{uGLBQ7cyRQr&A|cI^@Lv~kT<+PXzPedbj4svZP?c+$@d!fPMEqRSb*yEMon0ul5kHUO z?0#TsYPDUxAZ8)q`9^Tez;bLAOb%oz<>Cbd(HS$>!ktzxH1X9tV<;fW7g3V^N~xvT znb+wx;l&2oK<>)7AC)v>b=5-A=;7SyHR8U6T-Ms`c3R)T`PmLPLBG_Hjxi`l7cU1) zOQ@Hvn8mf><;MG8Wif7wc?G#gBa1YnoVdhgI6&vEN<+|<(4TnjnLoS#T@a~#&*=v6kVk~CMD8qNMfgF6Uh0&To>CF zUb`BOMUIUDJFIwirxB&Zc^x@gRO@f=a5E0pK8NYY@OopdTfRKgfPGw!m7L|{a^k&# zyn~=-z6z8fHlM%IP>!*%W-7PTla3f~B7>HnT~7$hHoVyg4zrRmRc2)giNA&7Dlpq5 zo-Tpk-41UxhG%8L1Vk!at)?g7+X!TzoBA?%{Aa@34dm#w5HWuL*soGR%I}~uM2mYau1ldq_G6q;%~I zI?&xS*zE9L!#FBc&6P1pM~wH8!3KFL8$M0O^fdAN4dNKI6|iu6B3eGT6*>f3{OM)1QX@37N28$d+mApML!CY}}i2-P_TmZy1f z*?>Ou@KFOf28coZm2IB=YM%gNlmaX5q3Bp zVj6MA2$+6~Hjj&izid=TId2%bHfW0d6|z}C(9>yiIBt;r~G%pz;j!=g(Gm4%Xs)c-{Fl1ZX~+3;tR zcier0Ng@^L|AqRjWyzH6&7cx?;|4f5?8rEd-n+ptt*D9jH}VbxFZ)kZFE`bjT-XrL z)A_qWH{*9>3Smt~&#?AgA+0<32k+p>BFE^1dFG6&1Gma!Hn0lHZo(0<-;nc0Rq_8t zK3i)9T426nqP3O5ho{eVW=7%PMs>8-IejWObuq#ILl}!s8o^k4Ga_?l#sm1b!7GKyoEQCjnD3evtJ zwF_S@PLm6JoABB?X1sYMm+FirOR|1tfVjN~*grdd(qD(QSvTo4n)ouGj-hOQdU%RO|11p*zH0H=te9 zX#Y!pZ+aqLi$o4v@`$}Nin}R|!ajy`*fKe}7Me%50ld;lN~C>_gwTpPI8k$i#whG( zB!`2f5~5Mj5oUj4u2qyb)Ki%*2dstBP|}i)7zdI;BR=IiZ#K7ksyBnp#tt%+V~(pL zp=PY|O>Y!M#XXqZT0umZ4vNMhhWuX&qO?Rhlq7ahd5JD+bXzlCc_++pm?0hG%PgY| z=8I*TLLW{vhkv`eogD}S;ovWQ9EBqc>S!;rfS~!Jq$b>v#HoGK-l;9@UW|>N7rJfy z*%7Y27GAV)qu(QauBeH36nUizhZY|0Zj3asD9T`aR^bXMk&Y&boou!bHTyIbxU0`~ z4dv)$b9z0;d{KxAb_`*hNgv^{;XqY9=Cp4%L-@Hn-LZyr%%mq35m!*1zvwJ6>ezy_%O~w!$ZptZ+-_-OY=t5WM7Vdy=cR^ z=Wjgif-PIajZN3lmzQ)+LE1N=He12u*x7pSrgOG$JU`sj)IBZj@}R1uBgW0hU~80I z9hYDe&<;-9nF=>IisRWgI897RoEABJPSf)(m~J>=mX4X@jNy1rNU7&vR*?4b)TSY9 zi^~J^^j|sC;1o4IATQJDoV1Ac|2_;C?*OQjO0U#&h`G3I!31%R*2& zJd;S4g=B2P;epe)HW|t>CQd2EDXU2TEb8w=eKdeDrr>1`3zM({&Njp z%tgM+S|V*FiPl;gduZezhsxo+wXhn-E+s9o&L@lYGijxEL-lh{MXY#42WX6{WvIp_-+Ppyp)C#R!E6-cam5qliyWSgl%IudVvOqtxXW#gX(O& z(p!jOtR0hgJfqq>C<0g~sLg>d-S;>mp8v&YuSIOrtb^hw4RkhsH^v&wGEqZiD}S|2 zS&wdtH^3GrS?--VIFLC_5xT~8^l=Rnx~3$~G&!s{>6fQ#Q`3z+bRf(a$`~`lUQzMrarEFfhp4@(`0tbx2hW(y^-aw2kMt zu@@Omx*xQ+Zrq~J;ZlRFA78dBD@>c@1Pz3iTAkV6R5vykVPv#DYjp}Kkrqf|k)D8K z8$AIUaabMgxh}U?GL)l8r}TP3%eBPXNtO(Q7#A|%f!6~=*-_YKBu6!EXy!~woZaN4 z!i*Dsurwwt8DMP%AtsmVWmM(j&)y(fGxs#j*7*01M0zwSu@vt`MGmB6gjAYB!im(H zOKM}84Z;b-y$y5jcuY-6$l7-mb=}8(cptQ-lS!)0Gqw9VrvvV5d`CrXFmNks;@ywD zYlCMtD@IjV>Y&juPp^!dU)|rps|-v{Vui4Z24UU919%T~M;BA<2z-E>Zv)4t;ep0> zINk~gJ@4YOq$ucv2xSF;XFy@{Xu#&_>kAJyo}*Uh)S~5!EG5uG2;yKYy^ri{*W%)b z8q2YcydofA<(nRkGflg;JdDgkz+CP%);K*p+_;Wsnni@n7szA!C`D!U5yWzyp3kv| zrJ4E@9%(Q~Y*k7c-(Fcpgh!E}5jtFGX|G?#5kA@wj(1iB4vJD-ayC|&#}Km*GZJ^c zL}uH=0XBhntZ^J}p$0oM3yJbLQdlyQ3vs41KW~9sk2jLT_HXb(mc>MS0%^2qu{AXE z8TTN4qEXi2U&h#ir9GvyfRvv^WlmZ0Nroq{Ns_DMOJ`+5K$1_PBs<;I!CW%H@zQZo z;8RWC@v0m8r$S1kr;(I8D0t;Cg%qA{gtc{0F}YNqK~;88m_+O>Ea1c=({zlmY7@Ch zL&DFZFddBlG_;pb4eWac1Di@tDdiWZ)0`i>{wQ0NOOk)B5q z1Dp}wSej>1Ur5~Z4d*EN)KD#M78CFVIcv6~YW@&jxE5AUm3%q*Ove1RST7=LZ)7f@ z@0toPHhssSuJ+ocAkmjlbT5d~lloGVJOST~tFqPlh4d1983h@CYh89Ly1(ObL<(wr zxp7vBp0V^Rboq5v(O*G2C!-WGj_85#N@F?lREsl5da1sOsvP;&4_=!bE@B>^mc*+K zvt|6FtIMdA)xR`t-NI{l3pC*K{wv(MMccQm3$Hb{qg3bgv42YYw-jQ6y^gSB0b?cR zesS{Xz#`y5nznT*f}v($1ijwqTgT_z1ckRN_Fc{Lx}P`je%O%e{Y;M*=ffL~>S&m1 zuayGQeG_$Ms1?yGy9WV7?>C#gV@za&ZDq=?+HGlx^cIrX1SZ#D`~w%(&hS=4Ihw!* z&tf4Z(%W*LviF{|M%Zt?AKtz;(98^XtyGrkQUbk$pwzp~@0jjk9mzY5u(sYUCYS2F zsLI}rr=BcM^uxPN(=q%ipLv4BQhW~;_kkihZ0;P2Hg!^tz4w~3!{ADZ^*CxmqP&lk zG>f>a)6;U^_Zz^`^ps{1Q+nxsfV%9_`Z$+>)g*-uv=NQZV_QCG3`gCkhH}s>Cg6t% zteL@=Vf)h$8^}>i3%ggi;<5)RVLp;Eu>4}2{p_FzpUX!L<1uFDVs@$;(*GFs*<{zY z#Nq@n97`i?-tuuntbI7UljM;K?LrQPAP zhIEY4Qc`8NA|t}*NMJ9RMkUzoeUO)Ra3uLk8O)tXm#10LdZ-d{A zP7Dr4zG%Ev{1#SxY8uL4v#{V_BAg>CGz%9YI%UhYO=pBJ8`06{mD-amAmy)6nPqbd zhnRDdqtgbHr^79?ZIQL~tA<#QpNuJ+SwWe}7iBpCzeXVYi5b-W=u-Q-Q5+djG9gSr zy5FEK4b40u#p7qWS_u1rO=81lE^qm!fgIt6Sx2mVv0PN}ZxPO-j>Amg^X^Zzv~1wp z#&dKjmQ+ByqL3KhA%kULX0A2dHHX@O=(O(|!Sm=+`&bf?^7p9BSt2cQg^7olnswp( zChcf?Ys+MlhSYyRby`)GIZ5vC^g|;!s;H&b8#4lnT_{L)B1?>&tzaq3Fq{ED2T@GLp!w|vpj!T_l_ zWdF5M?==237Fy0VM;W7A`VDW1Hk_UOJqo`yyknHzaP)WF8|-&fW`(oO3Kp^5%kNG5 zr14o>zTW{PblZQRI4!m~I?}>EZi@3qlXrxy)Q3n}MT9?*z^*X2iZDduOdkh_qwr@V zIpT1#hKfeMs0s@D7ee;|6fZR$u*zMFuuN=uq(%;zoh!at4VaMq)pN*b|x*=1Q$*nbhrZYl)|WB-Wx z>8bE)FRp~2>hG9p|KV@Z{R82s5me8=#kU)?Bk7dJi(*EEEq z@~4!_Mp;Gr*P=cve=6wl5FTtRhJBIx-MO%jksJfjiiUWVZ%kF>eM#ncDK|8&bXXH? z1)Mz?Ya zS2)aAuJ@GEr}tD>6YcO!tL<=ukiBq((HurzZ4gjwxdSDUj?AP~M$ragMNb<8&G~R` zBRQU7O0Qfk*AnX}vN*6U7RAiMh@4nbWXN@l=6H$?T^?HWLQTA*$=e6KbU*z;>ie#1 zC`a4Wkg+Tz$}yxQFKcIV5}vDYtm)&!xPeWnQ;Nx@dOfPrF5r|n&o`Rphj4vUK6$*d zlIn$2M5KQM>hD`T8+0CvJSSp4+|Uq?^40KgOi7#@k;4`z`?p#ngvKMPDY|HGY%oWQ zV-m`fU6lpJya|~cbZR@b)~0ReoXhn?TQ;3@%I0uWBRUFo6o`+Io3T~31iKkw+<6yG zDCduFZu*Xvt=8{F3CXr7%c;c9aEQ=@<4o0I6Vz0tAkpI~%DFd=qr1D&@}FS3_%N=d z+UPqcm*_f*a>!i_?aF&bXdlxP4YPebrqFQ?5sA>PoWxt<@DNL-Pz<#h#Vri&aPgOx zTBY>TJ(;@6^jw%kLo=KS!*EMecD$2PEjI;e--_Ct-lYrw)+Xt>+3NIe_S@fvg6wbe z{x6ue47W95hpChks@&%bsOWxgNBMmrpC-Dvjp1lG8m7riNt`>7!|8#% zOTu=vf9p2&(MI(+lj(cjOfeCa z(o6R&>K=;Q+l8SQ*J~?gA@J}Dtnpt&)aThoag6)3igJ=K>XHI)CYZJf7mD@zJ)9X9 zcaGd*JV#AsVzFu}LYzayb%4Mt(_Uv7y-mTg+l|R)&P*aM0*-hLJI^)b+2dbS785pK zte~vh*vi|W8?hcf0W^+T>SAG)R>7TTcX9ssyQpjFZt7oF0lJyZ-bd)u;CC`x9-Ff!rhGJn0qaBVtFX8Epj~RihuWwc%crkn93wX*Fs5zi=5lg+-Hgj9Qc~7HPU6q#)&; zROYb0aOI%KTNor?&Mt$j$8W|Mn$5nPlAMUUNo3ER_Db9_F^lo*l7SrUTf;7kQjz{; z)ZZ8S*`}FGAHqG2;fSlssrAtEMV1oiUIdlu$Byn!E|9#pQTD8UL>Uq8Lqff$qw5d* z6z87qzJ_y`X4$c&qRi`&cJVHB#lIig94n5fT!A2*o)>AI;8nu}YVx z9)*XRw&UrXMn)Q_u;1PxbI~<3Q$7QkDxRkimh2$gtzmNChn+% z4Pjj~9jHkEQPk&inV#$~;4CBlKRnvx9nU2u)N?6mNc=Gr=g2r419aVE`;`f75E~wA z5J%UW1%$~LB{kt5N8G-^CBqsP##~%>^LS%9%$NqV+?2$50y%U)D?VUyfX(ioXn=M2 zeoX6I3oD?2be}|B7Na=bN(VxCvI#pz+$E0}+Ciq5?o+5sv&$x3Fk^)Cvf&od9Mn^d z;V`>QL$AWFDvF{$jZ_9LGp-i?iSTs8U>BN zHqph+b??y=@=Swm84t^H%2K{qQCqk2EZ)ksAzks(Fyxg7*#tx+&-Kv$KHF~Ntns&D zQo0Yj!mETmR29}eJcsv?qlM!9J$=mZ!N&bj!v<*JW}ds2n;0%d1$!1N3+pDH$D5!L zm*a3Z4ElYvUgzsHY@CSpe4{%`R~8XHU(^(ZeF3qlt}MjCM0lZ*)>cCe*D`s0p-Q6IE^3W#Cu8R(ZO%aJ#<|BD7@5oj%KSeRZvqB>SaXH zMo10Z!T|F*IQ8%4Msf7v4UJ>c5#tqP(9%M5Wq&bDj4*=1K=YM`aCC-9sz+9lkov2r z&W6k;qBxT6ETqfuYQs2+nrR4TR~1E3UqdRJd)yG>szYZfyw*VYFs>;pg%Y= zdXsyz2*y@4Sa=RsXn4Ib9GS@?%1geeDGK`rV(H^Awt8Yi)8=Jc`}c)68rTteQOF2g zT-H&Je&3909U*v{$9p1dTHq9vpguc_X zJwuuD8qTteAnziCz90>9b)p~-93F*t8^hsrDiZ5S6_?Ty=sg6nJ&L=`pV3{|jxC-? z;l0Lj+zd01)yQ%T$K~?GbF0C zV`x4YT0@7niGI)USwnhUk5MDFb4_|;el9b)+>EnJ^sKbisA!b;d1HDSjbg^IK})K=hNVcv2?aWC#hOMxM4Zvu`N99#TlBRMLq z3F#5q)pA+!zd}AcDr*d7ybr=D{i*HTQ}(OIcD%f%Rv2nCYJ>@-0y;Mc>=X+*@xDbK->R*fPwVcn z#+X}K$prYeaaVb(Ca~(l`;x5q-ywf5@aH?b!*`A27@pKT%t$Z6?@^GAN{(^n?oXQs zGv~wA7^cGa4dEyxSwc@RUsUvj`~i{s0Xf}=sXaN{(zaEYMe2qh8qQO>Q*z;!X$kcs zqG)2}Gvydpi$Q0}zGQ70aA0aeo4v`e$mqu|6>tbSMrKTgu zuLzNY6L!?DpnYe4ZTgO8w_-a*0SW(x!u#TlTW5_I1^M~#Ta$N`ElaH&*j3UJ>31aE z5Ts(=HTp#k%e5a4Uf{wkCh5>lPs)uHes4E$0aA@=jtWuTL2csI`u6Z$~AmzVMnf51p zPVANteeHiWZHN7tQYsH+73u$t`gAN}jD&3M2!A)}*7(~l4Xl`0ivOTu$up8S;`yfu zJE~AaMNK+l{EG}WqbQUL4Vs%upTfTl<0$Gmxt>kFFfF0}Llm7b$g9razzhI(o&2w1 z98Q?hV=5%1emV4BdRW=bfPK(76?A!g?_PP7)5hA8f)1jPAXgwn4UUQ#&o&Kv8NhM> z%Vtz4A?Yhpnp1V7OECx3JyB*dowlrRXn8m@xVNDkUV*F=OY4fFf?tX7(%1}0Y`t)0 z!#V0kLphU*^shqweOb!lhQnHLha8Q z!!iNsUW2;(K{wkX-QAPjp$zY9n!clk2_G4(XY`hbbtXJ z;ksoBLCas5rmzPR%jcI1;{XdK4l>xu_?IyqW=_a`L-J>cNc~`{vw*4YPcy{-5Yu+N zn_ACA0qGt}-2oP z=L8*LFo#=%sRgRb2y!GLG@8ps2Y27PwlN%)rbrw~u?%qrKi4BCwKE)9VL!N0xV|wQWx3SOWC1DPfXedq$Z6j?ZEkJ@I=KGa zpfhz2c6hs?K^!B&hAp{kdSc#)%>7FRKs%&qzqN2<<2gL3jisOv6YM60u@OkELs$qm zHHO0g$c;*=feFzMcrz-Sa%?=RVB`&!ze@5yoFI5CE66~RnzoDJefrH)+x{u|Jqr~q2sqS zprc4PL~54BM7tGfY(OWcTJxjng@LAd!>x_uDAq`-EL9|=ejBQ*v$3{2o&*_gYXC<^ zR_q9(m{7MPiggn&6GITjsodTe7-g)X48-J8y#rNQiu3xXg~9v)E0peN(hhe?POFEM zFQSkj>j~M9x1M)5gr|9gvqih%6yrD^lO?Yfuq-ClsbtaE&-XFW>x{w%!#KMBoU)P~ z$_Q{80UEa=!nlW*u`r3So?T(1AskN(rA29l$QMyakkbiasZBwn@lQCz7>;Z-#GA&i zL^zWKmfW;L4G%GX3Y!e#@cX6I%12#EoU<~Ab`%O;d$c%>qo^>~eztKOnW>m#Wf?&> z6T(J}=J>KHm@Ni!*kn>rcAB(AJBPHJf>x~H+*mrLd}|nRbu@+ooBHshVDlkPW1MR@ z(M8HJ?R6oH0C5hE$DLT-$kfel<;}9$h=X&)y@m7aRvi8AvN1grBz!)F+3#YMQlI7L zGJ~+qFplwjNhCe|Mp2Qslg#l~ecL+iIv*}D?xyiix-KDj{Y&WzeIe1w%fopLtZ?LZ z#TOaUVUDB8P*S1tg>;0t6A}C2&QZ#v&kBAdeF_&F#!--Sa$)ktT-Flm&O{vsRE+GH zN2$f&fGhOo;g`{-S9dYwS>qpOlu4V~h`XY&_;)2=y=QVpO(IyNkNx|?-3;q6q8j~L zSxTV06U5gxL>JOu*V3O8L)$2h7APfEwkk3r+@p!GE_D7c2IqK48v7;MDlf5u+%8o|GQcB;hpd&(`1dWNa1=jC|fdL%Fe+ApCAH7FI>a!;2 z_6!XU`aO7fE;WdwCg;@oA*r3rQp!jmh&}yq$BY(g&~#4mJ)q-ePFpsIMWb#TPc3AeT{{T6k)6DelKYS0@nM&t9YwpL0icM~cW3o9 zrouAbVOUy=dc$ChB2vGM>MR~u*@?~%E6&=SroklhJq_U~gG{ZrRhJRuUO5YC?Zv2z zmh9f9?|48Bi#8}C_4`o0?)0^-9KwB#^S?TMO;gzW5zC2_d>;Fm@U5KIo1Vt;COh)E zMaFg*T3JWWF<&ef75o7izOge}It@EH4rWHJQ${;Zhk1AUAaLG2Jka1C0|7cyNk=!Z zvaoL8LHP!97tc*_x$RnMXWa+e4LEY2bp+2B%S8o$2;pq)aX@1$MG!vJU=BAT(+N}v zNcdqCR!!z&!)5A^K(dE|GD@F-vKZ4q{bn;H*y*^juKXNVVNUGbeNJ#xrRF9!G zIF86P@nKwJTQd^u3?d31i{zC}Brq0T9AX9U2)kU*Zg*-}E4xhRf zVj3!j)D^_hXGKrMr;%7TNk*7(;%swxy3zL3B*{_&J;SnwI31ne)Yjf|Sc?RA@H5u} zt3>S7r38BxVe&eUc20(c7JM!{IO+Io!#Iq^hRD{WBgS*c;FHS%RvYF9BgJYx*C>uB zms6`K=8G&P(DQO;Qau@=Ls`I~_s=(sBQp(kEa`~x0x~pW&z5fw3-H1V4dL+Ctzi37 zNQ@Veu^%2-vaq;sP@iWC&V?5n$MM7(475T@q?eGyF;2?$^xWcT7EOG3sc{_nX?SE= zNR*e6!e^FyNK_jHMZMe@j@Oct>h0$X=?L)(A{bAE7|#~|3$HZs*gHUx5d_yaVK4P;NaZ*xZT6V_y=65I-M`efx694 z@HUep5C)6(_5GyX2tKT#{HOHN{S|-^H&E6-L_$PebC=Sm_))6URL=S9PRPb*Q z&OQz%a90ip9#z$aXX~4WbTm&zYC((3C??dmGAd0zquGt0!nY0O=uT5=aq3Fqe21LW zaj=k%!go#I@s3Izhf+)Pd(>pdK{FwT{$a~9S{>p0Chu68t)*u%bTB=tWr8C9fW)Ig z%x7ewIcOu|6w`~?1{CHorhj5w?hg&W89yJ>n`bq#^TkTix{)98M)rfUb#i!dF(Ty0 zMs^etOD?osWihdSLRPj_mPTCX@>651(N2L|N zFn#nGJFx@}(M|s~h@nqm)%9#+Y29SEK0lAX-#|a_#;6pGG@RfXyHuEE=5@qjI-;E$actzXmb0w2aTq$MLT3rP7|RA!}(v3BU#!akz$_U~vXk2=OYQhYILL4glexkAstbyGFsUr+IMEwxoN&~^A%<}n{-txk zm0FsIQjXdjQPkH_@{m*ad&sPj_IG_xo^0#N(UpWu-2nEXS;Dbhogzz z2coDD3b0In!*vbmD6ToNQ28SLRql=kSR%=<-V%dSIpN^bwoBM0@T=nY*DA zCIp`M=FOtPKz9l>`E?G*3kAWMtXy=goE!e;yuowtBmaO z8R4cRs9nXXThY}W9M3!8oV3ES76H^n~QYrdeZ(#&Sd@YmefmJjle{xyAJ3Wo9I(ukCZ@I_vrXl%T zQJzDfWE*Pj8&@T(ufMfX9RA7%>$0LI-fhTZyebMlnmo9yxEkrU#&Oszr7B|z(!O2J zOKm{CIE1+sZf^`vqf;K{$NE$LrZobnBRXi+X45EhFy=}j2SOCl(edx zi0esYr76Zg>aR~Rl%o@}w91`bDLh@2Q%Tud`NAH#VS|Y~UQx{$&}p|4J&mID@?*yW zL`CV?gN>$)k>I!|RNCnvrI+sM)J;|fIs;#Pij*81(`)okJ|ADo}x8FXK=pL$$`g_)@KBVr67273o|^>(wUF-(X)5cyPF*F`WzN8DhhD zM8m4LWs2gSPwxKY#t??7Io7}E{oZCo$BchHexW2J&~}123WI*=pi6GG+ntFKoKqJV z#^DOH)T--tm9#{N?Ib_ znIyJ{i(!s&D|v_7yBNnYQcY<^sp^PvS29>{mN#6-J0 zX|h^y!V`C3#U@^zVQ3r2QMNcfQBslYieHIw4>B5D+HoaZ$54*8r_$dFSNR6+KrAMS zt4StEjedn098ySwsVA#BOd8SAdyRTZEmrz$O3K|7IW(bDPzPIeG#fY}$uN#**bott zg+!Sqh0idz;+Tya?7{+t-pnlGI1J^mo|@Y4vYL3a8#G zzzR|mtxp=gzP(BKT!x(IP#cc3!UqO(*!)N?%rY5KhNQ55qF1_T)9Kr`pSyMI#?!*3 zMsU1}QioPZNIg)UWjCsCz|p*$&ba{kBhz3RjDp1lstF6Ki47 zXlLS=V+xjy^wP^&QC9q&{-buD=~2+iRzfl+v{AsuaE77#ICl+=WK2ytu^iwCg<`~04Yvk@M;7TAi9 zHK_^qFv3{1^AaGoCGB`Z9U9?>8_?mws;?%hNQw3c(&*Snp4%g|^;jX+o*v9$S}AT8 z{7B=iGMp*G5+)WA4#~?{a#m0k`cXvh4|FB4xQ%n$7s8{B>S#(1d=F(k1Op*VJx#l(6B zSqv-c^q0ajP2J(Lso91|FTrO~@H!BzpGn-2hqH~b;vBXU=!a(;Y#V+(rnjk!DM$4$ zYXsfSb9g(+F*!{KsP?LVuCY%X&n{V9rXcO-QJd|3UMkM0J!~wfWz5evgrmLB0?Jmt zD5(ke0^(ALG55@c6&zk@6vrDYXC$VV?u&9ZY%_Ok1JB*yXXi%Y#RgbE{z;iYD%fc^ z_(D#+myoAcPqDPEQD++CH_Ugw)L4!&9J+Euvc>j1ea=Fnye#LcT<+s`^X>3*BRO(q z0?VpVk`wP0ergQ!{3tAa=*X)IqHb2x73t%7jNQS&~1&m{w}uF zgqQEl26VWsOCbU=xm4dmRlfY>Q-~80Z#7}Zvup51NJaW@qdvRS6n6o48EaG<>(bQ0 zcEj6^<>*cuqBoW)iu(?7>7tmLi>{A%8ph$HL?RW1lvs-IqT;bsw9wX0K*xhAP`xf~ zlmP#=io&~%wiPM%uqdo3ulsoq@8>|Mb2YEc{BYX?__M=%?FJmiUS&OST}rU`5f-fz ztsBPylRFlN;r&K&*j;5)D1DAHB7A@Z?hQ17joUB<_d%0(v_CboAxcR0LzHDXn4U-5 zhCsXzo35iAAd$*JN-V{XP?7aeTM={X@$ga8#D|sMcAoXF?O;p+>3)p5oIuj(ZURn~ zkDIK+_NzUDEF$SoWNCEPyE?svlfoxW`nd6TToK5wkGKiU(e`2}jSvRyLl&S(PzI50=yize(S4oa#VR79lzCF&mzeT?Q( z2)hQa2-3$vfm-txzHBguL$A;gvAC?u3jY=2ITEqO`kIR4AuV4us$(pilZ%-zVyG*$ zg!&p$S|}ESqc~ahbz?Yu)eU`Hp0oM}wfE;!oK^eJAs5^J!Neuuo5pbDBc+dzZkd+y z@h!4w-Ex0Qneb_&m~R`%(NR}6w=Ag$@f{-Q>%^=%+~f*&9L>Tz627|@MrqEyq9Vrk zayCxwV9yl^jns!v{QbZuKo#?*#@N25$3r!XZWe9J38Ekf)!=-lzv76 z2bws;{#NU-St0k!!eAChgV9od$DC!uYiiIG`!{4?AMCmx0VXllAg5T!^E_!wcqj9_zH$glXDXRHe+WhH6y>b+i+NmDY@v& zq(u4yNi4j1<1bX0>FMxCqd3NVIcchV)6<--myQsBA|kn9(958MHp1FmFs2~w zzfhZ|=x)sE;&7e6nyRBpXPYDK91>-z4@O`t0 zZso=4Se0>pniCHHHPR~M>|AL~E0)()m30@FL(yqESCF<$_JfDZ<418-&?@GU4HQ=x z>dR~>%Zh&m@>Q~P9M2Hje(z;KN8MSWk%W#2S0q8xn(=%IcGMVgVcqoJMsf7R4RPJ3 zB+iw{;iTqdSQ;+0^#5>WLpaQ@m{4!7q#^OEP`vh-a1xTK|1Xaz?pAv>5;)DsnueTB zb*DRva|;@Ra4)Q@8_Us0R;ulCF=go*1k&4=2OTIIi1G{9G?2sFm(%K@<%=jJ$h8RJ z%g3lr`<`+mNS$D`k6|1wL{2SCzQ|Gn?Mu)B04+PHZeb$qXDml_eIX!PammPSEWccs z5O9A258?aLp3@MKIXPOe)Q4lt-=BZ88MsHN+&sK^JB%$B`&lv6m!h>rS{ z6APCw(qCZ?CT4#=(KwpnGHxtD#Q_I9!XZX-ShfxJOi4?uL&;)U#JsASD8%`6Lq8m5 z97pvn&B>NDq<=W|)fw8MY6`>*M;O5I$nrAb_=c)V!W>D=0mw!%i!j=m52d}fq1NLU zV~nptKwfC=(#WpZL z7^2@VRgc<2hW24fU2HQa_pM2SB|V&=M5OdP}1?O3TU-EfN#$R-{$)(hIjX${yz> zh8f>xx%zat?fVqq)}Bja%B7TZPFoXrH$6e0>hLH`IFkY)m&qmDt3s zASdV@2t5F5X|{CO=>;xzfbqHz?r1#6>&ertrK#7lm}u)s<4Rz)aHp8OqlK&WJl4XU zN=bSU81LO%VB3l18exNJJKSF>sh(CvMmK#L35S7DZo4PYL}8Cfx)(chVWZJD;df(R zOj%}GpHR^i{&eEm7iAN@H_Fc><=#OY46-{o*^8$XsErW zB+mKd@L_3|i2Etv$YM@5Z8MCcrqw2!3K=oBlc9;~xaT5AcA5T7=!lCosuujL^U2ITC zCbK}S(aY6@yEAe0GjmI|miATSI#X<{){1*=4VO;b(caYwJS%tpR$B*`yM8a z4bHoa{2KH}p=076hip!XIUHLR2o>p1P@iQ#dQ{WanAjd#Q6iJZaSS>Nh4^Z;A6!*X z$SES(u@!z24d~i-Om^rR*D)Pl$V4u83FSs#dDF^#khkE|gr{&q4CK0W%Fh_>gz;ss zG&D)+r8`Sqc9%mqJ8+=;V1RA6G0#81Du$lvJDy@Wz4BB3A_@w434tng`SoJc>9E64 zdtNZ20X;{3`p@AUP|q0VP5Zda~d{m zTIfcLXwfK+#yShAEai)mns7UbQ@}-XQCW-?Y;r90hG#A8GOEWpnRG7>e2X(I^nS(}mirpqW4^`I zddtc2U`pcLkDLRLi@XXs%&ntSUpqzL-%yUHoCJhw5EJbIq;a`U(Iv#51@nJ!K|j!V zj$%|u2w7Z~6a{?{q1>*`-bkx>_UIzoX+J-%#+eEawtG15|Ki=&^>s%N;T^Gq#WC8w z=_Ra3MATFYOiRa&J=AW)u>vj0=sBz?De}WeW-q9r{BBK=PU6rM&PF}lkd7IZQuuJUc(37!##uL>Q;K-a0?Npfs7!lh z7P~2E^YAB|ti!BRD&2WbFV&||mBUY%eVUAf`$cWnQ%&5_Oyn6GnC7m6dNFNr!tvpmri~Bd%2O>ESPD{p7M1m6vV**h zC4T5TpKbDvIgu6nPD+XM9C8#PNplH}2^#X}8Ui22<)CI}NkPibqcTg3@`yGnJl}L3 zC8qXdkY1`UplTjqEY0KilpW!PYtbzaFk*V?zKFVf-nRJ#jzy!l^u;Fbc;1#+MbWOJ zlsGRTr(`+9s>TTyFEz-XS!qyz50H4CcNDA9c`+mjP)s(Kj?_?3KB}Lsf8YD z1oyKEuQZCI(l)rJOi7$qkwa@EhJe%A$3`LrDTY@Y%+bXb60tKXu1k`lzJ}C8KrQze zIQD#o<}z17zt*^pvGc0kMM+iY*AdM@PwLcm&FW~;*Bj8`iEHQ&ql^e|Ac4IRJB`wT z30Sck-e{Qh_`#TNut={mQe5&A8*t&?lyS)|f^x!tlmF(;hI5Qa&8kGI&OnAEi9M8-IV%xA> zPP})J$Jj%}PAv4g;hl!Kx#P7ICu$gfL%-50Bk zJ|zvQzlZ9y;j$k^s3XSv$S73{9E!+2=H73JJ*yT`MuZQLz&cM~ za(_mFL)Z@{MGj4@3m-I=XKY@G=uNL6D)NU&mVrH~D4Q4$`eDQ2!?>zg3b;p|z$D*d zI*Q>V)Mk%{aQPMwpTS9i-8pQJ@llg^bT&+^Y}8bA`yV5MO|*3xZAQ81fDM=`A2*Q0 zF=rB@*;QFk%ukTHAG5VM)f)_kEtml7I#a{R`S3~OIogz*JZ7%0CDf;g;u9>|H;%S& zM$%e9MPi|fOKA!88G=gHD0Hxl4}R#+8fVX{QPL6Pb2&4GANl;vwDvx45XVz15-T&s zrL+Y40zn7hsg-9$`-|vedXwRc26Olb$^ycbzetKgeu>D#SSC{p1DB@`#NQ2!K+5yDrE=COogYUMCZaG8=gUnA!zWG}A; z!MUH<(Id3;0?`-;8NO~*$9#3u;*e%t-NQF{5B2c~Prmr3K^^w7RcmF|t}wKLlsMla z=V0X3{;@aO(isl%Hn(GcuJCQcISP!eM#zKs>`o01GmsYb76!QmURtBoW@IwRP!?~BPugD z#fCwr3=#7bD_;1qK^*UE#Z7}siptJU2$frf>}w)CBt#VQf+9NFd+Q?_)$14mAOk;&WjV5K?#cy~Qw07}q zrw|^{x%Q;v|mp*&Z7P^Ya|j|K2bct(>VG15ijg5A{eC-OwL+ zLr1T8L&fTFJ~(w#4G6c)GRN?xMB5k)_!^#C&q;qL}c}dQ{D9Bbc832fPV}LX#jTj{J zZ$qpfk7x*zsmkeX{D(w#=rF1f+7H8Nw4*oNG#&nHEJv$miS?N5DoTlSIZP7B4M?ME zS7&ZVxIDgzYp0GMpP7m&#^e&c0!0r*>NIx5paVPq4bs*Tdl|sdS~iS6D{A6hkv#d; z(e6)T4L1G@dmF|vj$$j!gw`Y{70F+T@*IHL28!y7+8yltGyyk$xU!)f&Mp%X%dV=D z;$DSZIU_rPHyC$HxvIe&^#q4VDy3NY0+lCvYp+HCtGt{hScVwACtQzsb)z_DCoOrz z&;wHzQ)aF~)&XE~tQ!MF7CJk+yRbDF4p1_h!~8D6BF;>jqV7v7-65&qPGXHaGPs}NJTEP-ABqn%evUFC>`wv>9@~|2eg`)Y9EJl7 z;%KHVb;RU7Gn1A`2a?2cAN{eDSR#$O*Wya<&RnlE3I`d`F`}u8h+6&1bOk?{@FV&9 zZ1mRaWB*@T2pwz+$*)5=#O}c1?rms!VfZgs75`B3`68o(s5QsG!*G}Z9WOE^RbDGH zA{C?(F3#T*=u$H4NCfd=Y=`E$`LsT}*0$kTPw?Mwf^!bcBjIXGyA^Bq{ z&ju1P7iyqe_z>g4jx~hi!C7)~>?(_ibv?3Z^w|!?+RHd!h3gy8F;vT`V#e&MqA2PO zNM+YK>QBv1h1RZF6u=uA$`Qzw62>Q3h)DfLRA*a-1$NjrlgsSVmIgOAgk#ZcOf5oP zNSK=tlV%rCqZQV8Qv*26zS3AFrI+r_sLO|ANAh6;`Q|38L`Lm^y*75)01fx0%c22K%lP?M}!PXI$ z-p*p20ShM@WNmLJrI+qWITPuq?*3dGNBGWY{^}N{@8}9sdgWocmRKi~MHle=Xy+t? zxEK2?^MQ9XxT6I%4T0^dqA2QmQdyVqPYfwQwEu4G3ozfo z!1)v-I_gqRFJQhX!~{E)F!p!pNx&C0#2L~XjN`CMQquARjWQyfMgms~piPW3tzo07 zJ4S@1rZgs(>giNH0;;KgA$yp|<3@rE5sI*O@qMu1REudtS$L;75 z4m^b!l1)Z+yrYKUXjx3Ovoa0cJNydy3>)lDvN#H7uZ5Nb^rjlbMB7XnT@%^W5@#E@ z|2)sj*kU}-$g0mYX-2V8PsnqKly5>ATt}mbW6E%@!5r;W%+R{nlH`k|B+OP~mxTZv@{o&XNw>)}mb6Iw1>4c{`QMwWk{{Fx@?< zJuyn)LaK5g76%cvtuszIAzWk(N6jgTsAQF{i>)(GI6?Wj6UhgHj7O;mK(wYXB3XBF zq{799U5}rPY0a~OnE9eCC*Yk4)SxHInTNX=%Tczne1(;igt;p*r72wmVsZsT zxSLTN{dq%UkaWbjI~nZHQ-d3qsdeTSLfa^gN?A#*3{}2?o@n-FDYtd){qV6h)mT zmBvaDu8#hLtFWhgGjS!tjDa2YcOj#U7MC?u!Dk7l3C4Z-xhfDdwLPOb`tX{cx{#6l zC6s6BPOCxkDQY_m;V9h=D@l@$7;|K>yrHh)$R7;u=S|&F-jFI+PNkM+zofaSEk*`= zsHxP_98!~gKe`qh!Y`tuo5O0;OHF<~eley2GXdSWUExEL6EBd*UMokig!xez8OPy^ z&IyId7f_M>0_ACKRvQ5x$`%%l;;1N<^g>m>DUNyDNn*);6qi!OP9y9xq{Db>a37V$ zl)v4i(c^+@BWrq!BQ1UkOU81H>2h-AEMJ(GP?r(Kb|=MJ;2b)vL+4pDXr2(0a!=zq zTIq&(jhddA_aamF`%EuJ%eF5a&4qg#%HvZ&()hy48j`ax; z{CtHPY$_{EfH!OFP9DTNIS|tMgbtLY)MkVJV56^JnV#nsvw||6FUoQPK7>G8kf}Oi zS?fcMf)6X@F;h~q_+-=W1aVF4!>D{L6zp-da)gq$YS0e(>6`FyV{ToUX+kSJ*A?ZJ z(?{@rSk==lYYVTq^eB>UP|11GKTqN`iorbQtBOMPXv1&Y^USYC z&|N)-cSZX!*^wv;BTR5$9Q9b^J1SFy6>3W2JdPZ78F9VhMVrn!{ep8gY`=Kx#_)I} zIoi|)hkQv(tS69lAo66>674qb26J@tFmaw}G)IeS3M$HWRnimjNhFq5Yz+E5zvRhA za&)*2Pf#k-e+u;vM2@)LD$iW9wa0;W{qR)dIJ#Uc%^IMrac#Wv)TAip)5xrrjChaX z>BigRk`a}}c?LPGfqCOpMC(pWwz1?nJkvmqj@^>SrfsgXKqrKWiS;b9IF`WBnT;N& zr8Sm#wvil@4W)5+Sww{AWP+8Iv@Owdjo@faEOE8iMJaKfM-Gj51>JRb59dz^&o{)0 z_=gp`q1;5*g7u^z;*0BE9bWCDdgdoW~6DERF)^lJoFVQ+C{VPAU0(K^XyFMgRlRhEo%*;e2?xNw>y7 zZl;L^Q;8+`3JTJwwS6PgfmKVrOT#Noef@ZK6A;3#3OVs!MIOrx#yzQH2(LDb4@l zKAqe4r|9^0ih_Ep(H+i(hMu=kRs6S+e?#ON`=wypb#%RhsYOH&VOyI?tde8H+h)Ka zmdxo8=kRvB6-R7iO;k_4_T5S#-Q7EQckFYlq~%!X@J_oAN1tQKtHm~piS;hBSUV=M zm``y~m_82gHjJZPLSBjhs zA93=>O<}ukmMVEPLU_NS9DV5IR2n{3MMU@j37kzxH3pk%w@0(xxjD|;cf$vbvmU=0 z)6F$h`l_6WA0m-`Ub4c(`LJ;u`DrlUC?fTbP@R^P6_*}*s2A!e^ZP{_{^$x@EZ)oq6}ywUJkqdAP< zoLH=UK{+T1^EqM;1g2R2inR(mCzj}s4WBobqw+Q{fGyO-`$Fc~Try_ZwAm<~TVFJu z~U#79|7 zwC|9{XOqm3@LeM~T#mKrhNL0k?@^ewm0RDk-D}6%5Wa5!#}K@-Z*ECNh#wGfT_ARi zX0ZS3bZdlC03(JK4E{j*t>WkKLxZm34$LJv%jx~(ixstXD?j3`us^{-6vuDv!iK46 zQrO%7*vO9TmWHtv6)}E72FD6LERw{UZXWmgQ&V@?EVZ{)$VmQYlxIhU&5(wdw)lT+ zlN5e#1jqfSq}+A5q9VXA2w*)gV)Zz69DZp8M;;nO{wO2FuZXZAxNN@gYm;}>%GwZI z3R3yv=dPp>x4p_0qyD#x8>}b6hT7SwY1sU!-kE zB`4tT2rP}S7DwEzYG?Spp&Vnl22(5Pi17z9Xmb~_@@T-P7O}hf9}VN^=ZfSpPnai( z?yRIH*q;bv6-ko^*(C0UKO4)@e>Y4+n36buAxEZao*3APl8&u}<1&`N8q8su*xZ8h zWmjcEG5kh`?>=1Z#(_lLM-B^yclu3oDXh{AalxJI-OUuGwXK$)I2>&#Iqp1UyRSy^rQe0ls!T+QUjkfj#MuJFNje!c=p2V3c`hT?Qi z2zwdN;Z%raTpA*lt}PO$69aLsNG@F@h}OgwyTiqWu(zQcEq1L>Fcm$%D^a{UN=i!+ zu51j)<6CKzR9cI06_WV)ilzjv&iO^`VWNEiu4+8TN~%IbPp`NvODcm`%h()iIBNB# z`kmJH!71A|Y}ypAZdix)U0z1gtS9C*$mFz3^4Ild1{|9U60*soy_mav0ce+GuiK?XW= z{M)h7m-1m(=p5QrS@91hzZ@Mh-`O1wF`lC$=XN$ewzM${R3v{Wy%wF8i+z>0$ zu5BnsgIiHh&MM#ZM$#1ZC{hmvwem_?WU>eMI%^@D%JO1T(iHjVoL?@kwbf~D{tN*s z*EOnRGOMyYw=N~vF@&*SNnW&x9pP9bpo<>YI5jxmCgLLC>rt6it*KX=Tz7)be7nA} z&Kl1u3F#@8uC2RF!phqXcn1to8)Cr%yjDBH4GnkvcpOuSlaovIMie~+sdsfJCc-!nl88{HYqo`1ROFkG%r+5wN+I%s=jMf*8qCoqmiD4b>7{!! z>awIKHx^Y-5>7DSdi-Eam%QRmC|ORpb;QxEVl_m#AwqKJ+TEQvy5>ZK zIflZON?eu^hs z9HZ=plAUzKxE&d+M=8z`4{6kyMt5=YNol$A?TzNBM-9=E%M``E1Gxu*n`Q}Vdf@!D zbc9Q|qe0i>H)9HR!$f0MPQ>*jvOvT*fDv|Hfh*w@133yrHIg7INc&W3b8wn^7A@$7 z7YtU+1_L;{zJ`vekP_)Mk~o-8PddG%jYe_Q&4!m0Wkfig1h#NRd$WyGb~vXkgfooe z7;~)H;Fz9(XA(&BGOjy?H@S}mP6onwz-=#ZLQii)79TS;XRw4rlvv^WvYGnT`AwxoKZc10mE&L@Lq0z)D;iR_2M zHX}H~D@tRjEFk6WRA&FiNf}0Mw5RyEu#eycChw?2l_{OFj35^hk|uR_a7Oha(_Y)8 z4iii9PE_Q0qL`43ry8))3>TZc<2@G&u{hh5W+5o#oryf0$YsYt1?-K6Ne!F$DQ39IF6CQh*5J?AX0HwsP26Fj@L~u}s{tzKxyZRWE&6~bsP*$3DPU)rFr|x0U&4(MF ziOIIpdehS_to6c{V2Gw37{=lG$TIOsK?f~KYni6dL!v9*U?O?W@TErFb8m3c5-E^G z=K~*JcVPynk&O)D7@?P+osvs+fvRi`%#kT4s218Rn!IDW%hKzi*_Ek@wln7;noF>_ z!Y+e2+Jj{~gg`;UyD41Qa`1J*)v#pJ_%Nb$3qsny(q`sJx$+X%2`@H zAG?y0DEA_TOz;wlIJ2^l@jSeB(c6@FT3blo$c?xBETzmmB4=iVg?@h7_vEN0rNnwbrE7|b^f;1O|7(7H+27&uhI90g6$Jq+-}FY(6!i(DvK&`-)C^BF zoTD6DQladMTXTx>Br=j=fx?I_wx`0A4X`%DBBht^Q>aVBB849?x)~qgG0bzB;i-mk zn9P)kxrjxnTZR7+q*QPk&=YH^j=BKG-)S=$iY8tE5MQKM2BenC67z0g$G zA%|lctg7iJ0p071sC+2WXH&g5nt~e)*1#lILeuZ|V#7FIO;*t($`^G>fnP#!vSo)D zGxtvlFE!HIY}uGxsxQl`$(@N!xL>9>ArUox)+Kc-YSs71Kz!SqOO?eOQ_e9AY&J8-~vmZmV}1&{3vxYT@!lmJ;Y)1kqZs5fVoy;oU}YSPPa^ z7`vj781ErNH6*S%+j_Ar@qVvy9Jy(j|0$$IdLK!&?aVKj7Sz0dEgVa%{Mc2L66XWt z>;(=6BjJOl?I>n74;#`;@Iw^T^!Z$?55ptiV80KqMYl9%mC{T1Bh=+2PYT1|fKZQx z?)liDI823)8o*=lj(QxP>5(*uiT1HfvuB1{Gq2G~2p>1vdOo<-!;?**`U(m;@jgKw zUw1NwCosE;`KxaDq_G^uHzgLSCM3$INGW^Kb`Ha*4d8g^%bJ#GM1O|52jT9M&A6-E zn^>GPv-7ita6GRDv$HCvoP3T%+8;Ip6=$!|tSk&Bu=YQE-k_@tN-Xti^H-)R@)tg-06 zsnr`XYMik-UpIuqm)2mYnUXl)ASYK5Y-dVm={JqA#wrriOZQvUW$)CRpW)aIYj{{h zzHQPDH+)X2QjssDBgA(y!VW0hcJ79=!*>ngdIif)Ii!g6zejz(0-CZ|A_D7#VKP`% z_kBY+`o5exwudz-fqsxd>20Ceg->J%PvZ{_v}f^Kjg`XP7(Y721BRWcXPA*`+FfF0}k+XDrmmT}pfY_Ez6{U4&~3y8&dI12wYl*2(% zs|CY2UcVgLgX#hV46$n~1;XX=qqu0vQ3+OvEMG3GOkII|h5(o)g;?Sc50;CbzqaCU z_p)0!8$Vv5VK$8wtkfoD-NhAo7wWO|Mn~;-JYI2c<2p*NHDP+0c9pb5x)MnmeB+<{ z>!Pdcq;O@!IA&?&9!nclr_Uu9-Bn0Xe}m&%aXb#XqZsIg^K3%6s(~Dy?bX7@sHPlU zjXbu_c>0V3UziA2H;AJZV=_H2OE1}LP?oQR;aG|=$m{d3Y3hz=RSD)Q%LsBULJk7N zc7UA59vTa~y4@X97>@5_Bu8&%63UESl?BDzm(1$W!^-9S4lT#p&$z3!UVPLVxr{69 zQB>^x$!6)Zvf1NC76%y5VP9F&Y9XhP7zdJ33=D=dVHgfFfTKW^x-~Q!OfTJo^S$T6 zduP<{&4oiu+EE&EQqBnH&yo@0P!ia2Ca;r5gp4q3$I~&wVaD>5hv@l=r6GQ<3yOL; zspjIPEA|M(IG$F`#hd-GM^cr>jTMtP?w?q~)=zns`q~CsrCm3H70Q`1RhCt*jv_zL zsGtRp$1NFejWa4qK*~o`nPs^A0w=?DP5XZ+2a_TkL&9}HD9-XM9}CLisA6uyvBta@ zzh0qJ%2q}p<~$hGIJ&9p@uukbN(W6K_pmx9m%{b!CLEQw;gpIjB+3m)VPQli#0g_O zd&%UF8=uEg)m3U~-iVrN$+62dk8{Ar_iUG6UBJtvBNkknXLh%gU3szgNQ}OYYVt?{M)nY@TmQ;@pOuD}qCh zNVu(OJ03}GeuDy%yd5RkVx_&v86=}7<@ToTs4NW|nP(wU?m!AnxFYsFZ3s4pq8#pM zEJp-FA)#VaT$U6CT~8<%9HEw>6d_!G5$?}ZjOB>4EY(j_koKw6X01s5Td};jzc?Q@ z7-JQ0eeQB93d&OD8!j(~rl_ZpO81+EmEr@IU53s~*l0}0n8DJEXIG{s+Uca}`Q@65 zfZlM1aXbxmPFbz-&=KKG5?Ir7D}=vclK~t{5OYH5=L`Im{8^Oe<4O+{RLiU+UcP9O)eTJW23R*_m`ruy zf5o)YqyZfkXw9Ta8d9I4I$xcg)Qv5BIS<@z>zh&N8p+YmnTqmeS2azMr*pnic=ND> zjmg4{(HtE^Ly)voq(4jjWBA-^D~>9`%CT+h8TCRW7&CmV>nW@CFPB4fQhA~rY#zYZx*4nGkflov;xGboa%Cr9 zn3hl>qim#uu@a&cFi;v9$`R9J$;Gm(EGE_hS?U)xyMkFn9(cEC7{_CaQz*GB;2cd< zk^WBV(|Ae`w^#>13cHNrcy0|Zr=%s;Zn9YNVq?zsj)Qs}9IrvtlHnXvVTDA@SaDsF z6!kJvSukMs!Ytx}l)SM%!e#GiM2F8KCD*%JCMD9nNYWrHy{pb}7XrOT;oipa46}b)(?pgm zrkGr+56JgUe_9OZoSfxhxDPaKM^P&h>-LLFX$kZof>_k(>BE7$tsz3Y^jCYHJ=ky# zW27!2X#GnSW1%0C^Jvo!y%EaWFg(XJB)2B)%>@+#sHo zA*K|f%wCV;P465EG8*P`EgWci&^Xj!RAZd_T!DaZ2f>`!wpZc~7Hv%`3*i|IxD#&HzfhJG>&iSjInEnaZd;nHm|iHD;(!n5(ky~A_J&_WxI zGbgd*&2!g6O^IS(8HI12z7zF%5Yv#5OCQ4XP1-TeWQvL|zmVt)D9SKx>;V~GXqt{; zCO3`Ln@G2cYbm~nigfv>m(4&vo^k$S6JL*i98(L*Zl0uHl@sbEL|qALF}63n)DVt2 zt#UkYLd35m{W41L%^T$|+u`M=j1S{(UP&s?M&C&H71X6W0;`&Et{ToPex*q}YCuj^ zQO{or@G1gW18g3$6*p#hwQ*MQ>{_DAB&><%?-5>hUW{e}K}oSvPdT1h&NI4Cg3(&;X7cOvaSu-k~fe%!hIg zhH3)j3?1=KA zSmDzq?U*3Ni2<}tD&kp5`k$eG9^G-uuiGjGg@n%E2Sk6-CdzeZ^`9orO2xlQN0gs&UG;WLT@ zF1VO^GEBJ&mL&!I24Q@${eFD0-!zWn#im3RMEXwZ-^%Kj^|a_Ae0weGDN#i|eJAzr zP@R1!jwKD@yC&`ELz$wY%P%DQy)3$`gNOJ1wTLoBMU-Di^am7WjF9z)t;zYR@IzBy zCD_E0tloh_PpBUerDi)W=nX$Mh~tIkrMu-alfIMsPpHmEe#sD%Ge0$H$0KKo%G2Q& z68%}e<=FvN5)Br@&rR2H%PCPsI(;YgUr?PD|BTJymnQ6}_<3}nYUml8CHE`JX}S_S zJaOassqW73YtwbaDvVOX${pt~p(f04h@tPrUO$7rx{5COtuiwfb7rt4|hbFzwh42*%87=I#zeXtF6{%i=x zP$#FV5b~D-{DlCu_1fbo{MF<=ZGGGqsa&DrRGy^&mZc|p@N+GMzpq6)rm5U_{7TaQ zpfq15Tbh5GuH$7QK}9q-E&nQRw$JtG|29#_bHG+uNK?^`U+HH5LuuM)!zBzn!+%W~ zAI8~dN>UNlH`2Wve8XxD(~6#Od3+HUl{lJ~ybP%P^3r!wzXH`|RpLyLrLdQ2JMOx4 zv{$N>`bN4}q^`y#S_>ccHettuuAGA>$J))~SCYO`zU9&E9A4p-P1$kFN>X{y`bN4} zq3(frS@p@`Ay<1})iCFdf1h=Q^3$*X8`Z_X8u=W>*M1SMZs=78soB)8C9dSP_HUwI zBU5ok_xw`Z)>VaTu7z4lT%p$fP1I|V%9@R4JnUm6N6kips(LTZRP3AcktciR;St6p zQrOSb9Z#>8xI(J^oAR+gscg70w+;t0!u1a@-2bEPIs+rCt#0U{6Cm`mw9sc^>5v33 zUYlLmV%9cGAI39dzuC1tMq}^V^bQH6Kzi@J2ht0qLJFk!-h1!;yXQ!{?`hhTG2ah_ zrCFVGrKfUrbtOd%+&)*p;}c1+I|#ND0%x))i3{7~m7+&}=wL|l?IFz5%@b6JO(OIGfQH=w4XRO^#edvhHwP-oiu@1jbK9Ny>FvX9U3SFiE-lY-!2|!05fHhtoojqK^Bl>{N6JXmULLUKW40m$0-W;j85yPDx z%6Cmv;zT?Oh^T8ZWi)77JX&ERKBmtV2<#I{a102rRp;7EuiW%mmxqhQ1bD16^~&#g z<4;HPR*Fc%0tit@Bb5=eSScdvNHPTFkT?;K10p`C^_eioD`>XbEHPH~blM5sDb$P$7hg}5gJ7ad8wudyaG zrzmnnM{=10Y?ns#Q-O|px1KlVGzE{S_pIlRt9P43=+gm>WR zPb9$;J;AV;o?!7dPnr(_d?G6~IjWT%k3AF#o(uwfQh6&;wAV8gJ>ruxO90Ij;`RVH zV~t)W2M}b7ND8o2DHddnRzT971v-2P6mFT6*z_t*#E0luPkox&hGb)3#lxyWV+9y!ugV5gwdE ztc4KhPjXGHb&(oW%835ta|K_{Cz4Bp(jbE0N$k${d}Oq*TZQG z&Q;m0Q<9#}^`=Xej;ex#Bh>|d%_6L$p zf(-RFNZ0KZYfN2H&#|BEb|0V8&ZkFJG`~<=TPXLA(8Vh4P#uzjn|n z{8%U^WLd_WDY>|}We2ngn5+h@n48@I><)-Dlrv&ir}GNmqLFACM3_f5RZAO;oO7K~ ziiml{WeRG~r4fB2(CIScea_7$#g9-gQup$#YNV;;m<0zukPZ>{7GyK0OkLKsnyI_m z$9t7YcO~dhVe(1wyqCF3X(B4jWeO_Hr4jvVk1kuBay_UVUziWN%M_ryG@@S%bo5N; zb2(L-td5)Olp>;MW^)ES*&j%DJ;ipEy@%zk7S>sbOo|WguWHfICiWJQ4SMi zS240vVs2BuE}In!(7h$Mpi_+7AqIv~f3cl;igHE_qdr&AB77nVo(cl&d6z5Z4uy`` z^DZ~-{8^uZI{}AYc$Pd5Bs!ycnqo%uLaBdb3Z!*F6!z&rq4ylj|fC`pe{=M4Fs zTbP3*)iZrT%A*rB-Q1-d5pAn=E_N`)z8lz>MC`07Cv@bv*R*+-GW6${BFGw4Snvz^ zo((>$yNKaS)sE5jxN1}(mNFgJ>arV*zH-2P}cDxwSaQ;{&`(sP{_@J$MiQ-4B ztFf_G!UY!-p-!3sWSzVeOz3h`8^*9nVP2*z5naw@3cjsNBl^pMj@d%|Va+QPIbybu z3<0XdiTFw&qBZ&_ugt3yHlj89TmiOEB*CjekQpUbmCDPj<5T7}O4B3kk9d@DaHM)I zsOaR>kg@J{N)Y4ZBsqdItd|6Wz8;{cAb(KYyg`8@D#+&wz#@gP$o?!$6Gyz96u0i8Qk*U zrW_F+p>@bsAmYCr_~>Tk0r5W;TmCx~J>oW)>|F}pgkW(9{!TCaRJ~D~<$QCG!ben) zW(z7vQ^|2JIQEeOs6vuzG+1&L#V_SuO8V&8-?$1Da0>Bm2!UF3x1O8#C}YGo@>iv2 z8=Olc`g?)CtDq0cU^89~mWA(Aq5=5_IlaHZ{2f<@t?;COKj<;$+bP&?Hh(~QyLfJT z?KjOBe8@D}+6q1hB20s&aq}U?kC+DK*}ZJtm8?h1hXI#4fv~DNInyxrDR4vs?5tD~ zjMZ~LXkg*kR2>;H^-Z$mHXl)v9{C~XtEBmYQl!bG`6y^IOZ4+9^-(#d`!Qu&kTPP? zNc3?K;d9&0ZA~_xP@0G-K9eyhQ6`;CpY%+ky2Ct(7rM*?^I^(l445+MWO@)x(1=Qt zb2x7FDJ6-}h-{t!-zE|I(||@R%iir`KBEK?pT+0Qwqc)!-bS9!f(IJQsLc5fDSX6f z+9oAY@)Pi30AiBIZS&@HiWo7;r#IERK_3YO{X9TX8};ws(Y3kn`F|7LRrm7e{9 zWM2dsMqTH!V!ot25u>j6bII1aOC$Q1fle(tsg8>${8tn_qRljzClKEz5&ElMaCZx( z`I=%!2=1@0%nI((i2ik;!$RfTki9;sVZNa(5uI2wXU;Ea8f~n0(EcXaP))XXn{O#e zL^atwK{eSV7UbK2hD)VefAxxoT)$k3!kd`Pca*4K{z=Xl+j$@lKuP;u(4wae)n?82 zlqF(qFkK7xxCDTR-v=Tlwp!i%Kmj8rwkAt)txmWf0*+I`*(;V04GlO8@sAWarb5PQ zsIoTp;odAL`F;$(4n`xYSa@UQC(8aP8jU$*cAi1~XjvD|?B3GcbQriUlcs1S8F#L4&oXADX!N+_!Ps{6(oE2HDOH(&7pr>HZ2j z4j^UoHzkM}K*BTZHS7NOcfg@NHYO{HyiV{Bg^XyAHc#*YY!W5?C!jN(bVYfxVg9A~ z3+kl6BFVo&l3E%kWOp5&H~&$ZhylXav{-@06%D?I` z7is1=i#d@={}}lyP7`4l-%R562uIB4%3S>8$0a5adUHUd9s8+$V`;pGiy+JvO48f8 zNPdCmb7xf&pXL$CwE3A2sj zM+`77Q_vz@8qpsMbS(E=&tEDh#-f~0*-S8mBNu`C(z(iG_Jr?Y$uqVl_TO)xJ&_oOC$O& zKu2S$OaJ#L5g(@v5#w(rW5AM0C)2KAf;+SJ;exCP8rTWiP5Jsd*P3SQ-t`?ODR&1Y zrrvU6qj|iNL};OsAt;Q*iMR(4u`X~eR~+xHi|3w-9%0Mr+KQ?gv~pS$7fI{)!yaWNoGp zJ98u20YJyhYj1`(2P(Myk=N(4x5>Mq+$K@*MS#ZsfZV?-_oB-)X!cU6gOp-=F-UICjAYvH-S7mdgLdIy$k|V$b1jRfGpr~#b%j#3+ zXoZzO@*0oL69C&JLLUQY)POu!W{y?dh#JuM&0LF=I1vj#gd%R$DeIML7AtgwB`BLS z5Ip+>$&T}6ZfF}6D?!B^uVf3;`T<6oC7{XJVEZ=mM9T@v6JhM@Jbqd<5}gPl3~tsd z=OiVF7~C@NAGuG$6ylx?+&u)>)l+X>ZkVl)FD=7CRW`~_QKE>kqH{@_U`c%{s4*dw z%$RKqrzu&)gy3@p?Z79J;B*k63FGCfGZZ_b2}_0m)<1pqM2{$!DVitE2T^ZdbYDQ? zl<~f2Z@Y1Ptm*dLBIUTWDGbm>10|9CJai`a!KHzGDHkYk|98r zI1z_{2;X);ggeUuu`RkH;}(o}L>01gaS zT*`@hbD=Us3|B5wfbY_Xei6|3%9cRg1Y{93Di}h$f>gdHSW>SCHHO*Vx*X0K5dR_>W;ZBb#4yXFKG+d#S`uXd$uth8jMaaIw9AH> zP^ySl*?BZ17-=R!1KXc75u^U2FSuMDl%%sD#SlA#x9H}SCSr(1j-W6|An264Vs_t0lP%TO1d!Q-gghMZ>{(PeiJ6;}enFdGz#_@b zAc0aW9=mG>%`FNaqZF6qq2l1<5rnw60vF34{}PUy+iz2XhzTOh8!&}Gk@9v>qP}FD zho?GSXrH2daTA2k7?5QE$@Ek(L1!t~>lX(GlUpDQSk zPb9(JAlO?7(jmJ%H@2xZnXZMOrHrfO-*uQ^HG7U;9%563XF~)u4Ow-H?*o*;=O|@F z)6i@|DK(WG&jrUpVKLnjL7r5emTS3+jk2jt$$I>G%00aB;0s%CVsF9#|xKIY^+}iuVbI$;uGitzMxlU5sr0 z`$*ejP6!{ACJRZjSAq;Gl=~*dS0hI5elfw6i~P{qQ4&KO$unuSOxSAO7tiwpbnIz zZ}deH!=v2IId0yhOc6zLnSvs@G@`#5=+HKJKd*UJJ?GGo@6BO_i9CAJIE;%m1A-(fgFW%L3LnAD`Xk4T|r7Qe+ap zAH-OiUoj&V%9{CrvdJHvloQWp^8_q5iO?SeG&UEtY6{YT&4-ksi$O}WbgxapBh`mN z1v`eE1*;gvFSfML{6HBN^u?Ci<>U`R zkXh~FN=x%2rHGhyI#(62Sc)Hm1U*(%c`i_Xq685=)@2GR$)yqfr$Eov(Q@-M#a~z* zEhoXxL2#gmuOn5eAtDLG2P_`u7fL%Q{~80p>5CW3)kN~7^W1bIcL>zY*kT3guGaY7 zR(lVpW1u*{ggBcEf%w?`N*Ryq%;-l66JAc?JZgRoIE>wHb-YfFH_Cxu^BaZk$%pRT z2pk+G|1GG{*i3z-?(TFnzf+zrbA%Ql%^0+Y3?P|)4<>9(r+z9k)1$>xi}Ff``GYd{ zMUCQ86@4UZvJTGUzGdNx4JnFiu6~A zw3n2-*(@gR0yx6bFn?1?x^xWxjhj)jI!^O%o`G`w9dbZ5Z_G`M$omxkP*(Y)Q+M>N zHc!x~Y!adW325kPcvji`OMxTww9G@y?%fW7An3mV3O}0iSk3%LF(dqFBtsyn#EJM{ zAYz~$+EAN>|IhyvHDaKhwwVHC2SoJ8hy!$XpgnKeY$i$e)Xsti+ViYtw>bz(T99;7iQ5^Feo+0(6%~^zDF-vCPd1vV~%{SAq!Lrt^#-P^8!a z6q^afI@^LGK6C3isda?f5l|Twpnt;bq{1$!W%k=QSa${iTINLMB(sY`M~EyLf=ZS+ zMSdI*(Kwtz&+MwG5sgEwfSH5rE{*8B0i8o3UmG;LE4T~@xkDlI1XA0l2MPUn4{hJY zF?-Ah+U5zMZ4#mP^w4rq432Q_H6Li3CxEs|gx(v_Xd%;bKT&l=t~EFND0;-ytl0tr zO(n;^;D8BYQf`PVoBb3&!UVxA0X9>JyT8g_9hX$hSO+L}jO;#JfIsb1$#Eb!up#5_ zpe!}YwVGL^G!f>4Y|elw`vb`i0vRewW~j-!IapaDDvDVGc%~5d5RZ#zNGs;h`QS23 zfXfu(9tK=!-1UZef`Ue9+)aK7bvy7K4mfoE3E37(WOL&P#f<3sE>j??OQWzy0v#q- z|AeAF#&nc&^x(JlYZI>w`K(#h%cl{JOh<#MgWnbATXT$Zb}?J)%sh|v5b2KvJ*KE) zadbv3tJPxB6qGTdlxe=8GSg(zEC$W~LZgD}>d$X;quP#B=JPt&uR_F?HG@x4j)y21 z)oqV5OO!KWRCm2F`$=sQp-%vGW(Mt@FefVdf@aWOJAhdN=33nFuC`E(@>2n1PK9K}VK@ggqd6&RhO4FrTMSc$elB^eGn6%^(uhN`ZrW_HI z7PAE9UpUz^(#Y{wTU0Gn=%JnO}~-#Y_Os`mZs#WNJI9G zWS@GuGDdu!R$V*;MUEBV$c`yzn{$+5VPneKBsjO3px>-~lnDAsunGhRNG)k&@qNDF z$7bH|wpv-&%0KQfxhh7~01yyRm^Bb4T+3D`W>cTC0TrT4dx;bx3rzC!K#m0v{6p6& zQ^W#DG6bzr;zS$-B9yApaWkZV5lU6d)o+9w2HZXZ*K8sZkIzZ#hH1`Mnl1)0-=msw zvJ+?XZz%_5SqE9rx%$MI@85m7K*_rdN}jwb8O4P|mUN+KYX*=^7lMhEIAJbQf|yF2 z2tLZFoun@Y9H!1*x`vud6f$DMv0kVFtc9VZmjW6}ujs_wN4-q3Ba~j3DFAnAL@xrp zgNdGA7UBv+PsGcWwTmS(vXiKZKFdIHN)X47>3Gl5IRcpxrSH;5eIvlQoyi;&zUeoz zmcfcF}}@Yeai76=jS#$L-%M52!L|q^W`idUv{EU`CZCV&>><8j{TdQjCEj zvxz4zZ#dH3Fp2UkXcN!lk*o$X3~f`JDrUVBL=0_`A*d;d6LA9&(bnaaB)UwGD{4er zcbP(D3q;)>+%45V;OySh;ayyqQwm2)!9O0*R48 z&?`JB&XE~2AJDc((FocAD4M!!gZ+51aayq#v~3A2lFWbvpD`)Pli0Gx+o%K)pV4Is zD#fJ{eG|}OadAFr_MG{wQbbr>vN;2u><=WH^F`TEo!eA56?3KXL=?qk3MgC}(XRqJ z6g^&@zpidJn5z{(W-)<_^F^!;0fT~(;~H>arkj~D*D83#OgA$Vpw8F>C)WXPUxCxH zUNt+r<7%cR%=OB%M*iiaqt+=pOLYUp$P9FFgYK)yOS?BJ^@0XEk4LhbK!$-^`k^TN zQ>Hp)ZdRV2&PDWtN189F(lnVgw}1wFuW8jvspS41+$?pg@}1q8PcwGclogeF)wPh9TVJX1%EHR>Fv3NV5gsN>j=4EO21d&P+TF^X&Q1bpBkJd8~=}IiSHo z7JuQOb%-olZ%Ff8<%@7e^AOnuj86+-S-|Ik8QO*Y+~)a86|n?p(?o0%p+hP+}?-0)$jeWj8_40%3R zK;aWf@G4IrE*5eDp6G6`o)3Y~6%hDD61)ZkY{hfs@eyNQtMDnE5u zD{91&0XYI#heGJL02*Vi-NqHQ!n{?%BgS0K7BFZkIo<}2%mmphrtr5bNkpA@9&rVV z6z>29l!vmJlglgKspt{P1G5C;GljVK0CyL`^#iV+2)b7(dgbSwZmYR7+x?p6&KQyJ zUEmAHCwVp^Vpi*5((SX{uN>#(H?F440a?>;Wc>(OGu?67_?Y>q5-zAa3KS_m1`2v3_;;(| z5%zH<>B%pZKam|2M-#n&*e5`wDkEPD{iL!)3|~|Zku9LnR2Jp|aKNh4fPv{jg^#eR zAV+|V1cH7Fp!*4^YRgUQS6uIGKCNT}ooh@Hy05mpzHJwRmO;J z!z@Ahm_po#fXii9$%uFLSi?N5&=GBHlgkyL`$Q6a4g~ltW8?J^^LYi2_$40vBw$cVAS<_YAqNre6ipgH4>)uu0blk9lKl*1T&C1{F6QS-6Vbn? zm?t2xNre6dpfR&amCBpx7(ITe=n*rUW(x>3l^nnF9B8z5E%fX8aA>xGLsQA|8*pR| zF0%4%nBUHarnCCr4=~dF4m6kzcz@jQ6+U7%K#rgQNFeAR02+=bP-cth{^pO$vGDOk zQAztJ(4tjUMzG;v{;W(9t;*&J%43rV{TD!^ni4s3X#T3e5!Hko0W1;-`Zs}@;W#oi zcY1lFxW@clQm!%o06Yyw19LEq6^BIAGXGT4E-T{lv8AcnV$`#x%m9$`Ujo8bB|nVU zOVbwm-wNHOJPsPU6k8z({vUv2;2M~d^G5KjDOwM!|0;f$;nCvDs|j;SZ7lH=0T5aK z2Nqb<2E@gn>`L$$0nYWNw+t1@tDGTtOBYwJSlwqfSLhS){o6Z`T4*^A zr_d<^v9|y=T($=0>@REkhKAR!Hd`wCN%Kdy7{uSI8NavKyL9DBv-LvoEe7$o0Y1EI z2j*7sIFD^Pk5%|CQ>v{Td+y)YdF98nBWnoCMCn)S_glFe0{$7foJ5-NLCjTWAfXI6T*|ygexK>PVUbN`@D0&wo zrtK5uaoGv*@Bs=b_63FQ)SR1Lr`t~na=$>!a7_)Lqdpn-PZ`c)o3KSVKpAp}jj$*4<6p0Q2k?ZB0jX0hksw9tKHUa{%4+HkDQf<9!iv7bf5%m?9 ztvG+hAo|}El%dPURJ$R!6^mSlgUeQ!{WgPD*G-O4rrcH_SJV416M~T9NKn{L2vx~` zj-wPkw-e$`4G*sXL_QkGZrZELQTB?(K1Q)~C6^sp$>z~lr69yV7Wf#UiZV>Du1{76 zr_GF<5#+wXdYP}NT9OcnEQ`Tn>!-IW*Ar5oJWeTc>!&Q4dzxU#iVOFCn|gw6O-%KliG58cEX+nSXU=< z)ZL1Gvf}2~iDdFJ_j&*#pOPZ4tWS&j2_UZGKAbE$VD)o~Ru1M~?c^q6BAv$n+#I!F=M{ym_*s7xK_2rH}Z^VTU2& znLxB-jMc7ti%!4pQRv*svQ?Y#DCAfQ4m%(BN}o$ryR(!+{>Z7DCe%O7eA-0U>@L@NI88yEu(hU1c6-SCnW>u93Ypuq14v{h;u;{@W;bAP3TWD?8v}}*t8P?qBciy0r)!D;iah5D z+zh887$Y#c*qO_$l^?D#gJ83s=JKE@O+R>9lz)tx`UL(EC3RxT45?fzJ69siawRjj zSTSe})tLSfKvT|Pfw#6LPmh0Gn66jkaMJn8-LoKW3rD_n;Dbk!%A|ix);F08lriFT zgXZi|S^*^Ih2Y$~EvKwUMqnwwNO}7gR8oFG2TJaX!OflYZ2j_|**d;NnPW`C9;8Ew z2}#~d+m^Y21|WXBwMjWHe3>#Ytjt+Zau>mkqpnzb(}Au~PQ(F-|FFpQIDB<3SN?PJ z>pX>G(bH%fWC~G&5C^yY=rhyhpd*+Om1Ir6Bzdi_8J*IUAm zu?7Tjl9IPPDk{rBzARZZ)=M^_9Xy4pLKu7>EsFk;Z6Bj5NyPq$B6Mh(8DPpV1{wBh zD}z)ZhJr-t`|>LykKSUExd!Ha+A_;rKZUMo)+=`xqk8P1>ru(R0qk@b_gxvHvuC7Z z{{pYx`(`LG~*IhYMx5L04ER5){NfDO0;qj<{_l9&#m7D;UWdAlqFkPy`y1{gM@H zaA{(}T0qRS(sfx9wkc8z61ir;g(*gNwhH~FnepkuC_*e~%tj@QXj3VSFQMIhYepsc zCXi!}k&E@YS8-8oYQ^Meovs}6_L-Sg>If$>4?6;bZNsO}C1eB{mu2unJ z)Cz||`u2b%y#}Nl!d!-B_3{uN&C+sTtF$q5dpp`@ND^P?iFtmMUwoCcvKlI=AgboH zxn7AQruGztB~GIuD5-A%_2E*{=yTR5+uaFUkQC>)ZF^WzuA!@#8Q{7)a;6dTDv zv2KD`nCy{BZq2Dy3w`5NnO&!cGJhAQBNq-yYk99TE>4Wvo7$bGHQaa0OambRf z_;!`(!u(2#{%kEg6!9q#5r=LEd12I`43$zz7^~f%m>BDFv&2(Xs0h`_UNvP8I@tLr z2cELr0a-9TPnQfk)=;6Adugn$wRg~uGD#mdBHwpnMOqkS`gF-z6LLDs>;hn?$Lb%y@ZS{&78TOW22jape z)SG#x3U^t)a3gK*Ii3d@rMwGLq7^kySu*pBy??qWjfzLd?pDbnmK`3973L#;GHgPo z7|((jhlv>eW<1o8owdYBV4sqfc^Pjbt(Pr}XR9z>uKV#bqlct0KGGGRB0UEpVRCHD zjL4y2$WR!-k>5gNT2_MQxhg}1qMgB@7@2qmlf2IZFD#(tiAteY{+p;Z%B~Zc=PPN1 z`UL>8wLv1)3qXZ#+qhy}mWC%38l;5zXRwbz^nhyj-P-Q2jh!$4U>uDae-q(Q_%B=qe zGFQv1_g0;(Z<6O5Uq2sWj}m^#CP)&$0mN82m2KUKg+){B<=`F z<~M;EmT9#29M!0|K0Y%s>6D{4t2`G=?wsY!e3hdt6UBTB#N_auz;i;b5RgzzjL>dz zDe_xYnx1@VY;OBi+QO0YZJ@*+L);{6l`NfL->w3Us^OP+INAry@_r5 z%9_{*%)6C0V&=~?u~|$qzX#0nU&b=9tm-Tw(0m5!vOP5?>W6{H;bcS3BTtL7Ol?}O zDk@gwIX2OZ-m9`D`K>5564-vPFH+8zmg2t;;$zd$s~d6&wyB%lKwu>A0YHz$q)<23>WHf8kwxi<1_3M_p1aE#%GU3 z34EkIFopOCgutBBByB^~D0*%+%tw_!qHB1Vb~TV8I9Q;VG|H}9sm#Y z{>*BZ=14pO{h%^MD8n9rWIiHmXOBj@Pk|1-l}|1)#g z2=PAyd^mU6xzmO986!_Oe^yDlY#Mu#4s)+u1|m?TdI(h59F+qY)yXAxm3D&cp2}78 z;}!F;vh~Qn$y>#*u{Tgl??0E zaktm;Ri%trHrUC{E>GHkk?m_>qXFD+E5T@1sg6&VWZ6AwzOIB3W$ZB12Z&_-23U`h zGN$uyy9sw$9oIjIVlT73`KC&8L4G4naaq}EG>?YjealyhTL%<-mzAVGiX%m6okjC) z6(yo|w^>&-;gRz@;KcOW)B*a%^o&If=6my9C61WgJy5%Dl|?1}_dpMwPpDRvrpw8i zk@a}}0lNZYram)ezOUR71F(PUrxO}Ef8dLZ5lIHSf;9>@rs|Wj-6kgp8**XO50y5e z$R3DA_K`NIr2mmGvacvVAbyp}@ z%cwJlx@AhEJ~}PuhO6eMDojNCS7e-H)kp*lYvyMV@E{Q|?Ki#DjA1jvg)Dd(q$cK) ziut)p72yGq!Zr2E4(Jr-7Z3*8M>rW_0RE+tN6exwJDFRc$n+~P!Csxp*?G8FS(EJ! zafbP|$`G;PkpeeWKrRmj{0#&|CrxpD&54DyB-iTwR;7v9ze`b?qzOSu{X0!n~7VE%7T*lU%sp|iHeo5AUXy9XLml$zm+zkZXJT9Y=cI=|A6mMDQVk9^(?N_ zuU1^&X_)^if6Qi%U;I;o7<@|fKS-3dO_&+kZ8MKK!ynjuB(@1+5PmbkX+JbMi=)|G znIgVz-q7SgNxcQA;gwdMEmy5!j1y^}kIIu?JPN6O#4oZvB#CzeF;<~?xe~`O3;G9J z)9y+eu?h`eu~Z=a1BhgOyf3a!U;0O*DpwL#%pS@cVa(IaEU(7&4~t3WJw0QY`Ir}MHgaU7KS~5IvR>(q;YJX5cfu}2)b6JqHJ7S~OU467J+*4L4RVva3 z8|DC&p_pIfW?)LwoY;kpVjc)Fu?=qDKa!>WtZdm<=xaJsmz_E>cuaBRmV>h@-Ku=) z93F+r$EGX?L6&0$8d_qfz05xBEoAo>&A}>G#HuHHC$oizA|3(}F(zi`IQVDEE~c2M z_^{8RDo@Phn$`dP3>N~D_%INotJovLHWaUr32p&CK`A3laW*eMOgk{r9S*wVQO>Tr zP;P1}mKsH=0$KLSS)?OWmWVc-!rO*2Up5MRB!tDP+>Kk#N-4Lpo1;{ih)!*VX=kGp zC^8)lChWO1&mh=5k!wk0H})81j2Nap3=8igepXUUQXdOyXqL@Owz3>N#2b!9YL*41 zkJxfjDDrE}tpx*M%CHzRQ1P*D4n^E`F(A(GLd_I&oYF^3j%^gn7DVzM@5^qz{bYNo zA#W)cR=NLIs-vRvHOvwvkFfuQP^^G3sR^BeoB%*`FM>*3pJJQRP@6|1@Xf zQY4C?u{OAr;3P_4r5&CaL>C4NrsiW0Tn9N)tGS*suH%UgaScr3jP!(C6VP%GnU* zG!Z48v)fsUp)iU+3^aq&PCltPGbswcnCE%1`Er$VEdRTuG?Xxn&XbkGuYmBFxgee> z;X_qfXyDA6Xi)saoTIWu%v=tIf^|Y9@44W`tljoAi-khmg~VB6rIJU?7#^x~^YOS8 zVHHF`6T+esmzxz(|5W1vnYWQuj zDM$%|Kob&=fwk4K>a6oW7#%N(?S4er<&V5>m+?JlK_XWfT-XZ1YEP_4o3Qq8n2M6f zR!JU7=#n1*k)sL@?B$8?PR2Gq!(o3^nIcRF4uCb;28?WDV8iBvpQmu7gLYJRrXx|t zh+g7BNa-U@h@`E77D`&v{;jwPlZy}?tX>NDP`M(!+~ zE{=ko78l!P7Z(fn3i@MP6FIHcR;vVSh;G{oKFdX+Ll zXY`0cAMXZFfo_06yNE!6yE@i5H!3;S)H&l{2XA%3A)s+*!*RW?1-Ft@9m zkE*#e%R$ke0?{y3HJdkSEq$s=(UV_W9)WGsM~0lWRE?!~fDpEb=8Xn%U=d4?Tph^` zj60P%!WQA7$m}D210%p?nV$v`I9&3U1i3t_yg{x8fTCiauDlTq)nz2D`#Xb0u4jM? zb`O1lsIW?W%7%D&wILS|*JkCiswwkK<&E&gNwG+sM%zJCjJqHPh9oRDHyJtFj3w+k z`9sQFsL1tw#dOVU?p9&Wi}>sw(SCOy>w-^-o&|}x<%>lyFFkLxT;t`=A3s~=S(`6U z_TDavluc;9$jcH?pyxm!Y?TjoxH}_MBJC9OT$L=s25j3#pgG1tq;n}}-?&xfp7wRGUb!ob=!Lu-21tSp}2x0W}hzh1c`_TN1gxqYNP zFok#ngg`|%Z`I3XuG8j?N*z&bMIg7v+}1zv$oM8O<{l5h-85DDAdJ^0^rr4rm~o4va&3;>cVXVDGDU1}craFuk90t$7;l3ZR5oyj zS;pJT-IpVdsB%-rqqw#%lIxmTGZwy!-BseL4UWj%$Z0PpgJ_6s#{f2mDY zWIxQjTd53cv*b9rd_vjb64hLMJOuM$U0)_wpYv5y3!5kX1*K9EBt z308f`WZtiw5vGTZi#|Y+=>uSbg4W$E12d~^{Z@D`%Y0B}xRA|adg}DWi={3bg{e7t z{$R3DH)D&v*L4calBdyj=AVFv0)7YrLT_mv$c8Q)=v!9w*Uo%cMT!`l6^asR%#Dx% zFlD$8G8`l_z)Ib5hqH`ZH1AhAA~rDM_cwt}Q9c4u=>MPb#Fs-YvgdonOkFzSM^%6r zYf(EF#S9uLKL*MtNa;K2@Xp0m#^Z_F_?-E;O0X`!b;PUhAqyq@1SIp{MGP+;uT_*A z1XQSt^M%4^ws5kmg3BM`Z&kDp`#u1{@Rgc|RdL&y#ZANJK@}%r$m%jqdq7slr@#u` z2_CHSpF6)+vFA0+r*6N4)}Hreny)Ko z#0cGC^FYx^_ze(pgVcW!9%?#gF0ea-0TosS&tzBar32>2N*PG?C~fAaDoTW{RS_wR#+)|T4xbYJ3=-k9Yj1J=W77Ow>7OjW=QT3lXKhvZm}5RaUH0B1L|J6@67R5Vp__AL4nYJXHgBJ`4Z=Vo~nnmD^hy z#iDtP7;Ef<)iF9>a1U=Q8re1zL@qI#C#;IS$jlGpm?Ch9YdqS;c(Bes#+3~Fsr$-fo&G08M3;MRMtJ}Cx>t(82&8PLP9 z;6CD%`V^DY+ko1yul<2eT3w*no5w0`MAr@0*DYA2+ZJ@(QP13jOS*>HPPycdJPo76 z8p1-6YI{(57k()5qT;2;VB~608JbGtgEJ#=Z`nb)BV73X7{TiGkuK;IXGe$w^GUnA zR>an9c2W^y`fz%!yzRzmTrY4DP%`ffW*i37UWcO)(t&qTM)@PR!rOOX3q_vCfoESq zk}7F^IY(MQM&@0WGs0aX1@cwh36}!w1_9u`l({p!k>HT;?#dd`j6?tAW-Ky29!&U{ z{_AtP$Q=3FLkT0iSUrd@q`h7qb6Sc>+C4#wFUiLZWiW<@i=lWgrISDA_a&R4$g?+i z&`Yz0m+vRDgV{%^<~{1SqLFQ1uwjC0tAoll=yI|Ce##m#_9_l5RAX7qmZ)UkAM6JU zd!}Cw4lP|fG(50kb^n^erop7XX`SrK$n$6is1y-Punb;v7wv#gc@Bg;7?3k6K|gej zN_Avr%q&t7A~b6cL4F?*6(hwXbEwiql+Z)iFP&=1DIO^g110ow-_?5UUGCPR@dRb;k$;l2wDkyN^pPwa z2@eM$#==qgbQvFex6_8yhB-ocBO0{_BCn5xTWv%o{gI%D8C%cPW)5i2sFUg7jCgD} zWC$@wsVGDFRiL1hMPuD5DA&ZURTgukC2psVjT;y zc9ViSeZ!!4x7qm>Dq|1oi@-Dh_g~us##9Zm2_)UC7`g$ua zMK~TJV2BSqeeL@pOO#i%|GfUt!Pl2aWI6#%u?|bn63Wx7>&xnLLBfeDM})B_-GgNf zq|tU;vescKOGZ&of~c6*y(Q1@p~DAzqA_MpR*52}bq_*$e547Hw5Na;TYjm~{W3uG z@+cS{^^-e=OLOD(QpKFA0z~MkDO^*PbU>#tr$LzgL>Os9Z7d4zO2yMvjtIMGisTz- zM_`I@21LNcI&qdkjHxn=m?x@CmrLH9Vf>NX1|1zGeG(*vC8D{*VJN0;{>ds-#ER9U z_-X=*+88_}iO&Qv2Kctxjr7TJadFCe>h`D{5d*w}QV@-06{uDU%C!`7VWM?j8n|(V zdnK%lvs8!(-LZ|IM}S7YUhqMC3ud8SQH$jjP~QHm2Pw=lWsm4PipMI@80NUP=#-`p z(!dZFUXsh3T=7>Or<8vhK&XJyU0jUVtAFF)xTWn zBh=R{9A(HRw8f`5DPU8|EC9CBn6;gHLWNEJZjMB49QT9E~io zS*g4c+Zb({AicTqK(Ca7Y^%VAZ!WKRi0fW`TpWf<2Cu3VkYZLVVZ;ECVzA27Xcm&R zYd{N=QFcGJ$}6oZ&Ym)$%n_4S2YX6@NaFKA9BdI`uSgW>IkQ#?BkERnm{?Mcc`$Mf zf(v?*{)`%euO zlvmaj&e9=U!}1hGecbL}${@l$(F>G0!XqmMW0BKnTWkt)Ap|*51W8qhc4}0^deF(m z3&Fi97pZh30+BNjrwo)Xjm8sFewoAc= zdCp(3giR+?+EkmIlDn8Mn-5vbElXJ_k`+OQA*SgT)k$d!DBI;q7qLL^aF43PB3}u7 z1>w^LRl8Lhe&oYFMQl#u?B0mVaZ!F9D|YT;t%Zm}mLVi{b-%l;JzG4N#bi`bX(9$* z%^QvinoLOYRz0uxE|7aQZ08i$G`zYns=N`yt_LBlk2E2Yb_}$7@q)fbn<&}o`Bfe+ zPN`gaAac~efp*rsS0X;?7-!cjSwzVcgGJMr+kLT!WZeMP{e(48vew$CxuC?2%Gq)= zuG|r8VMVgl>Tmtd7$`h0C76H&XiCje%4?z}IqhU7l{}&;DM;IbJ4o`@!Ha$tJLoxA zE5uL*^u02<4#YMKdH{u#&wTb?&IioKFnmF)Y}8BTb>89 zf}~cnH|0zK4O>z?2bp-w?DFtbVgi2y{V~@nb;J;vc6*Yi(F`IJMF0w@X-uZT=J57GhnF-pR<;@&xehOGH&IA@=sYs}pPgNrM zBaf(qN!UWMM0bD-)!4ei<+Z?fDqX~cn_{pc)2LrG1Cq2)18o~Wi%M;D)V)FZbY)(k zpGAsE>Sur&`dj)9@jB@oeyT`IUv-Mf{3^0Xw4uoiDgM`Lw zo~zUgv_S%me9r^lBB}b!j&F8{HyNC+tl+xv`N|zJIA_po$x|1Pw&;}Q1-|T|vfxfJ zy-@ih%HCO9uxR9a5%@482aSUkQmAR}#vAiuC64&s3f0=`nt>_9OS}x;e14JJ^%93b z`uOlJUh`6wAwnzgP!z#O+Tv1#mq7$*1zxlBD&hp}JLcs|9bxzN5Ty2z0FRWf040Xd z7PY~8CBITRBZklvgRE)PX(EIq?W;h0ywJw&&4})#7cstCC8|jNoXP5u@6Ti?De-F{ zaf`3csGN13?&TA&uT{AsbhgfOc8W;a*Mas>DJXJuKF4Q0e9Y@rhRgEnCPkd*`6)|A z@!kOOFcmg8ZkRlZY_)GxnIfh_57V~8C?=`j1nQQp=jj=_m(RReIUo7fGm*&j7BFEk zGuYw!6e9;rWil&DIO85fc&kcqasHREV`R)lEv-zH@NJL~8;DKYC1}LhvuT*Ot2_}J zW#^3&1tje|K#M-n($nQ=mH54ycPeYdw@*RZj~*72#P@(Wt3NN5bEpMbMU=)@<6hi* zl{ccaZTn&fNYZzKG%(hRH&?l6oh{6}l~Vr58un$7jU#%zRi$<&V6E&|!NDe&!Rh*xxnF4)rjfTGQ;Ls33JfsW%dILH_V^IJEYLry{1H2` zSrpgV(@VLsuq6K&$kFkd)fnEO_HiZd$**sZKvEy^S`pw#_z4hVr7VVo6>FfP6w2dr zT(n|7sbr_*lV!{h0SY-D00)LJ-5kJd>7MNVRfZ&DYkT^1znciECb8)EtCh=3B33rTTRmU3#(?P@tBBx4js zPlJX1*4wL*)nx$aje*%%PLzuDI*<)CyIz40HTTwMXRaqihSq6xLWa7=hl;CHO z02klkD|OnolQW)1M$>fT?&m7f1^Lwzy>Zt}LjivQ0nrt`p}-r{{6Rcxr2C~xv``~m zfJ+g6&&8WYxr6p=6(K?&Poemk(rCJarpOfHHxPpQIMWOIf}l)0<)m8PV1BFo zG3t;-A-O+`-!E>5c;(O|lJ|Gu#cIShX&DiMHwmj#wek9xY+(If*$48QM2MB?F`eKk z${!#KX41ev)7@nBN9B(g6Fd56W^OV16WE~T=;=ps!AR;A9Gm;Il0|4aq4!KH8q4+< zuq_g{Oowf@*7rfRF@II=M|UrP=#=GekfmjnErWxM0T{&1-&KT&mga|leq|rwT#zD? z_8*`{p(r7G)gOdFZ zlA&Yb5MTEEHiUT0ll*D_2+M463a|-@RGSGJ7tyW@W6a_?0q3T@xw1v zMmv$nv<;YGs}PF@{G6OML*H$Btg^`;dEG8!6={P-zHPw=WmoD_Ul~v84LQuUol->< z%w}hWwjhycdl2m}M859@9y?QVb!4q9oWV(7HjVfQNRQhFB^C_*oe-{ioUzHWYQ=a`H&(0!G>hd!pK9db|fO5kgmh&0fIsFiZ90!79Cqa?M zR#-YXRB&qaBBg-MIEMmxZ5t0jNN^AcFruf0@XtpctQ--(=k1E&fXH(Qc(7Hg&8}UG zmTjxC+N4~3hBwvBp(?=n`Gr*6+~ypRP`JY&+ z0EzYPBiO)ehNS?9LjcS*U6vJVX!tNks1y+mI|D~SGV$hBi-o5&M?xBmcyNKrTy8L4 z9i1-nwxXj{hzREsk3tE2#NBM5kR(1D#GFsu6|DX=bwg~{<*GSGX(NoHvIQ)i(Y1UZ z2?0rZtdh1IG@+@yprnt&BnCvXE(R;izg{b`W;dDbhw%E)aY`C7etHO2u#W_Iq&yy! zxD&hW04GhOfo(&v#=9YOiOMi2nR3P$#iS4#%MPI}43z2wNY%!EDm!f_ssIsFd51{^ z$=C@c*Gb^Q4AMc1Pviz1u`Jd%)T&F&$;uv~)TMwde;V!0t`f5N6zCKPgsXAwx2v!Qm# zrWet2J7X0>ZkFIlC;^t9x(@St1LkXau4r^ro}HIf_LEi6h>zK_AZrgw3C@HBu>S?G zMF)|MBckK>r~ol$sd+q14RlJf6q3No(B$%jom+F3(#s!tAF8vrlkNidf($cvyG^fR z@AAIhbNkksWy%_{OjIN`V2x!q(>yMV+Xo4@6$vo2;ox}H^ebJ2au%8$6@=Jl0~^|( zzr+hJV`A4_t^^Tl?T{cuBFhS}gs$tdr(w=fl892YQ~v~n_~!y2iMRk_~UT%s~W zXmN_jD$rQQw%&|Si7tgiPM3GB`>fhz$;{d7=Ppwb;;Ny2f5{+{v5c@+2rS8SK7NPY z3}g4K=D>22pu~+g{tS&bKwqhnB>9qzh%-2Z&93Pm5iRTIDoBngy?Om7)4DvwakWYo zVTe=+ej|;!1tkH=c?~!h31>SmBX_YZ-GR)tN*|%xrD)_&qp1s17M!A72T@>%wHr&) zHVZ@CVk(G*OpY~QuL8vMRptZ+1!xaR+8aO%`YnboQDum*WOzVI;3Hko zDb7t02NOaEvw3Ax|9DAS!_6wmD*4AbJtl)iVKVWoN#0>oj$0rHw%VH84qAh`RoNp< z#fm^~jk&f6JTl$}Mhu#*hwWvdWAA3UU3nw6LsA?Pr%@S!d6%AtCH+%Ck9Bx<7OYLe zIAWfvwBpT@TiNZWK?_ByJ3zI+AT|3fmSr}Slag|-PA(lWcPeYdobRDnR3GvFkO3}5 zcp5~&`m)oy+bhveS0N(WSqerm(x@yIGdIpTYzp!W2!c6UmVvSjH&$3F{}+?Ql&MdZ z#-ujPGgX3!e(zDp?<38SB)$v8_}=LzB)33|^1bg?$_TZmof*m_k?C1r!k%rmWmW5= z=GjVfn*5sAhub$R!6C_WK(fEUw0!&egxs$ycXzVmK36Fh+@`X)6ybRg0W}(2_Q3;O z=K0DSQKKDPkcdR47k~+hq-g1K@UXC05bH&47!sh%8Bs}_LGr41O_UX+$AxCIdB z`(CVM5udPw;vzWYc?o#-buzc?2#axWdZuAss)P}DLZL}@-w?K_q<@(&roVoW)2^%% zCXLd81NB z>>{+ZRtrLcH-P{>ve|Gd^BK&Dw_M-PHQN)1VVZH?rdEN&eY>-dp!8V zN**xjBBoIp;4;er!KDBng8-OcMNe8=9jnf+;meI5=tJ1Y*Mgnv<0q9nVvNr@O45Q(aUOs;SOmgtX0+Jo)pvnol% zwoZshal#~Tunb8k)|1vKQHk&zg1rt<val5qHlmG?=T&Qzm?)hwt5C{vzl+J1Q9EXoDoo(o*i%tO)M1eTM!T5 z-mhK*4`w)nd|PFS`1T%!AHYYNAxZol5JN@uUBjQ^!=C;YQ)mVN4obl(HrF#;C147Pg==KCr~F<*{HzWlXpl=BCW6D}9c zXMv?R(5Cc56)9raY4e7y9#?jNWc?9XG1fV&jQiifih;i3S;PJ2$4VNZYPrn(9PRPQ z`4e!WgT;Lj=@sH_;Ep91N9$&yG;MyWa!veyexG*SEW^)G1}ITF+3Fv-9^&UJUc|T3 zoUAyF=^p|}&R>8NEkyNcx6CUib$KYLVScHs5gk@R$f~grlDxkH?>@rYG^C5Ae{#;< zz58pWju@^z4w-$#%@Y|=lK%$eE!NKZbRwVTw@MnZ%Nq&1+X_a) zKY@j|9}M8bIABcOjc?W=ATL$ zQ@_Fc%uP__`4@PwU72b+R)=+Gl*JWg$!RB_Wd5zR5zF!vi-k?2?Vu^fe;@`byyf_T zY#rBSoZ_j5|EdrXrbv%O5qv~XHDq8Z!2cisl$Mt9%GoE(W2E!gm+d2#Z;C>8jrlRq zVUl?>!R6I86*1Sy+g0*b7%c_%0YkZ_dqhr`@G--N*<5AlvW!)n#U*IGqjm>fQ#+zl znk^s=rk|D_clz6GsRBf}C8iLpg)};3-2#9}(yc&xfRMKAecAm~@7uApvPW2`JQlfq z#2@OwUDA^cIeTNDS$M397oj>V^gv0-&1&2hC4i@%9&2oO zToOMP&CH}6%9GP^+o{YEi*d!xJuVrLP`K?O99;C8KLw1g#gUra2C#$564ApvOxw?^ zn55p(Q{&&l__=&&)ye@{j0%%=Q4_|^PV=GmFr@a87EDs_3~HJhP~2LuNVhlW@R756+onXg zxVuqo$Y%4j*+*F;O4~t$YX&9#zMwxy=-XTH+=Z8N7^-acQyB*HOCG|p+QTGoBg`;R zp#33G%l->4@-qjh01*b84l4ivk>^10piPSse-@ARRV(h+6?q=eEK;rrl`gXmXoVu% zL14ochg&H-=L#9;(54PnvX~Ck(YwN;k?#=j6@(8~lh6>K8j9p)uBwqOWqaGa-1g8g zhpHqO<$vR7i&QHSg**&G?kGI|EZc;s%K@z?s0yP?} zjGE>crHxpXY0gfx?j-iH;KWGXZhr}9-)2ogd3)sF<;j6Bu${W4>k?#yPB{>6xm)2d>}t?05u6tWLO9_GG!r@6lm zcJ^K#a#z_RHd&*me8s0oXG0{+!p*k`I#+<@DnrD^Lz_Xn-``4T@yNLXoES=5EIncy zm^SAqWyEZ#5NshD^QI#JlJi_}9w3~ZPTk5pj3d1(m3+1Q#a!-V7;a zgd&)NkTZ=s%bmp}@i2(d-LgwqYru!=u{mFP<&V7P*LDq?f{}0?2%!aXFybV%3McY8 zfR2kJxtwBG&Iljt6ozF@qfMwJzYyeyqqOzx6DOl3k$@M#SE~G5A2qy4MOhb7{0u0C ziDaQ@7YozO%<$F?Q~6`r6#sl{@%$aFjhV|NQf^EowaHnTAC7E_laQH|kUse$1ED0u1?9(?nP&dM}n*xjr^<9xtmR!2L2e{Nlq3`jmt>VfS8GyCe)0C!`l`0 zH?xudcxU{>SyY<%bITVpoA|G1C6q$hHO}@<#De_G^$_fW9hyWM-dc)0BU8zo{5ox} zl(00s3%?hRAJocLlB*>BQggM0!`nzxzz^mc{@WcUt5vyM+ck-6@h`SGzHt2TZoH4( zCgYm*DV%4$PJX_|T#tXWXNTF!+#tWMZVYc*$Bln-I-y~1M6Th-Qz$r86=$X}ZdQt; z6Vv7vu-KJF z`;OG6IE_q>4R4p);LWY_uP-&XNqEHYuCD9I&$#BJjy8Kc(DrEhLA>mxSI(KI;Kyt( z$sr$j+B~&6SEjLL=9v<%XCB9yNK#HJNi`Ww#*!qdCF_$7$@uzBTgtzhl_04nQ^^&HNgBy?GLvjfHYKykTykY{ zRdRK5O>%8=U2^^Uxy|HXUnxOyQ*v{1OLFV_TegstTO~-Ik~}rJBe^qqTJrSuyKOD0 zyGxKfBY9?WS916I>$Z}V>m^8@lRP(hUh@3p1<4E7ABfaNNPS82(&S~y%h%t8l$(+A zs^rz{Z$lYwM;YFbyfJxG^5*0%$y<|mB=1~*3naM}!5xs`PDt>csPv$C8gHpGZELJdiw?d?xwq`sbj`&qbLZN*+!=mwZ0?Lh{ArOUakl z--cgq$1h(?zMgy|`DXI16$|!{kTFkCUGyKTUp?{5<(Z^2_!2 z;n(}|>#veuC%;L4oBS^Mee#FokIA2sKPP`l{+j$P`Frw@Vi22)0MC1A-k9?1W%v1iK)37XIA=n$iJ_znY)!&Wad5AtA!TtyiKyV*G??-STqKgn5gy3KVhafl< z!5}`z5Q4)HeFB2R5gdWwXavU~I2J(x!D0l*Avhkv5(Fn8I1#~%5WE<{DF{wQa2kTs z5uAbGi3oN?v35dmCZatEwm@OFMDQ|1Uyh&`!7>DW2>KD6jbJ%~6$s8ja4v$C2v#9j zjbIIe0R-nESc_l~!4QIB1m`1Ihu{JP7b3U_!NmwJL2xO8%McV1T#lfGU<5%KK?Ok- z!6{ZY8UxnD!2(CeJ zErRP1T#uj+AG#mG4T#=|;3fn&OOV`?+`IlZ#BN7$A^zD#2<|}iP6W?D@Js}~sQzUL z?n3l#1kXb7Yy@vY@MZ-2BG?bX3lO{z!AS^CMsN=bbuWUYh@OSu9SGiuppIY)!D|q_ z7QyQfydJ?D5WEq=a}Yci!P^kL9lycL zuL$lz@$W_OFGT;1;6Didi{O6<9)tSZ48i6Iwm`5Yf~^p2jbIxDk43O8g6$A&k6;G` z4`Aqd5Wzc8=yxJ`4=CS@-~;&Kg9vuRKi-`=5Il%rPXv1**c-t<2=+&C0D=P%EJAP} z0PaU{2%?7~xCS5eS_Fq9dIW+a5gdi!XavU~I2J(x!D0l*Avhkv5(Fn8I1#}~2u??r3fxV@Blu*g9u89jv#ml z!NUm32r3Aw2u2Z%AxIF^5UfYA0l_$e&ms6cf`{-a9!78lq6WbXf{h3^A(%xlhu}&C zS0T6>!8Hi3MQ|O0>k-_5;6?-wp!qzA;1)!0l^}Ud^4j%JLF}mr?m%!S(*KX6dknK9 zTf+bv+qP}nwlT5Qv7L!Ev2EM7=EinrV%tt`ulci{e$TgS*G5&H)19PqyY-!b@zMlL z#3W3{6imf5%(M)%(8jO>Fk4{`=3@aCVi6W&39jNAmSY80Vih{Oi@IQ~xDM;F0UNOi zb2XWVt>QLp#}4dQIDp;a9(1-0U9eZ&hnl8P3kStRI4mB)VetrJA{LJ01Ww`Qj7q@W-cX1C7@d%Ic1W)k{&+!5;@d~f; z25<2Wtqh|z-isgb5uflGU+@**@YLcw!%y)Sej|wMASi+%I6@#KLLoH5AS}WmJR%?> zB4HFpBQl~ODxx7eVjw1BAvWS5F5)3R5+ETGAu*C5DUu;MQXnN#AuZA&Ju)C8G9fdv zAS<#VJ8~c=a^W@JAUE=yB_ycuN7xhpd4bTvc&=^h76i0Ck&CncQ4ec9RiLL*84d2in z9k5uFCFm%2LT7YAS9C*n^uTQczk{A)FZ4zq^hH1XiGdh|!5D&}fwUgcdSt)|X(YP3 zlDh@cdTZ-#0pqcFP2tf@y7=oK}2rO4vft6T= z@5b{3tHm{#?XWpmE3U(OY`{ir!e(s2R&2v|?7&X!3Z(V%)+Yk?O8b!1$&%rqcnF7a z1fjiYhQV?11Ww`{J>BA!fynzchPM}3@r89GK5qJ zh0q9tun33nh=7<5i-pKy6huWdL`Mw7L@dNc9K=OD#76=oL?R?c5+p@3#5L)7NGYa5 zYNSDWWI#q_LS|$^R%AnVb93@a%2W3!N zEQ7=bo&;sZawv}qsEA6aj4G&#YN(DHsEt3+SNHu;PpppyXoyB=j3#J`W@wHUXo)rm zKwGp!dvriYbV6rzL05D`ck~FPb^F#G0(wh*&=>viCkA2=1|z*C$bezu@IYE$Zha*n zqn9$_FRzcmSd7DXOh9NKkHR1-q9HBP;j{%ggXtR2z--LHT+G9KEWko6!dM-Q!%}e> zmSY80Vii_n4UTGj4C}=W*oaN|s_+e4#chb}wvK|G;x43i%ca3yaUUXDh)6gn9>Q^) zz)>7SBV9GdN%0g;;|$K?9O_w;`nZZ~$gBH&xU9()T*Wn9#|^C1colAnx9|`C#ckZd zeLTQpJV7h8#&f*DOT5Bsyun+%!+U(dM|{F(d_fD#)DqvtANYx1_>CY&x>%DXsAzF2 zA*4bmghm*IML2{<1Vlt6L`D=uMKlC;j$nw0NQkWv2XPS(@sR)tkqBumb~+>#lOZ`$ zAh)jaAhnnVX^{@;kpUTz37L@vS&6(G1Pe0xeO{ ztyT!JEpZ&QQ3ya=v_pTF#Q<~^JK;KRppnBGqpR2r-O&S|b^isu#Xjhd0eEPc9$}C; zIFQy0TQ3S2CJo03j6?&cXo$bWG3f8m0hlh%z*GaBhPf7e9@-i}JIvH%7LF<$!(4G5 z=3@aCVi6W&36^3RmSY8O>hu;?i)*kJ>#!ah@X>%iVY9deTd@t>u>(7?3%jugd$AAu zaR3K#2#0Y5M{x{8Olv5Pizjg2GF`w)@f7ZB@&ISWb69IK>u^!Lgv+=RNbA|H=LE!e zKmy$K`Yp7#F&!|!3tKtd!!T7`5-Dkej6q(DlfLTaSJ5RHch(t3I86#<#Olm%Il4cUHSddh#N{ucOkz<2z>PyE7f1PT00>H$F!48ai!p-}-95f0%I0TB@e zQ4tN%Q2+%|2!#;~v5^H?5eIP*5Al%z36ThikpxMR49SrKDUk}Pkp^jz4(X8r8IcK@ z5dtAm1yzv^*^vV|kqfzz2YHbX`4Iy#5ebn|1VvE{#Zdw!Q3hpE4&@OBVNnT{Q3|C4 z=^ijUa4Y+Fnfl)*&b7Dma2t1U7xyq93$PH2@DPvi7*DVfo3I;u@Ek9&7)!7e%kUDf z@EULM6Tk2l@30&za2Z$d9v`q0tFRhta32rw5uflk{)f-_g7w&dulR=V_<_yXg5L-d z_!ln%{_)kn2r33cScJoNg&hbkhCoP!LTH4+PEB^ra-y}WDc}{mudf9 z=2^X*4cU6bB~c1xQ4Zx%0TodRl~Dy%Q4Q5m z12s_#wc&641k^!Y)I)tVKtnV_V>CfiG(&T=KufejYqUWC+M*rWqXRmk6FQ>{x}qDp zqX&AU5Bj1X`eOk8#6S$fU<|=f48w4Yz(|b3X#9mS7>jWjj|rHFNtlc&n2Kqbjv1JV zS(uGEn2ULsj|EtWMOcg_Sc+v>julvmRalKRSc`SIj4RlPP1uYr*otk~jvd&EUD%C1 z*o%GGj{`V}LpY2hxQ-k68~?*`oWMz(!fBkrS)9XpT);(K!g_4LRb0bS9K%iA!aw*I zw{Zt|aS!+L01xp9kMRUg@eI%L0x$6juki+N@ec3t0Uz-RpYa7>@eSYc13&Q#zY!!% zAVCof!4U!>5elIZ24N8n;Sm855ebnI1yKb93@Z^rBE7WP!{D-9u-g#l~5T~P!-is9W_uBwNM9jQ4jUe z01eRyjnM>6(G1Pe0xi)BtcFP2#c`brDUcGWkQ!-_7U_^48ITc~kQrH!71@v-Igk^%kQ;fB7x|DM1yB%$P#8r}6vYtT zq+_5sN}#kt8I(milt%?rL?u*3HB?6p)I=?Gwp(3LN34r_sE?@%)36lFuoA0q71z*8 zj0r(RGF$jY(1Vb?l!_n7* z^}`5pBt~I0{=yiH#W;+|1Wd#v+}6PzOctkLDyCsNW?&{}VK(MqF6LoA7GNP3VKJ6q zDVAY5R$wJoVKvrZE!JT@Hee$*VKcU1E4E=fc3>xVVK?@mwrTx={o(-}#33BU5gf%a z{Eh$NI8NXsP9d`Gj)Jq|Ih@A@T*M_@#uZ$}HC)FH+{7*XgMZP)Ae-W@cn|mS01xp9 zkMRUg@eI%L0x$6juki+N@ec3t0Uz-RpYa7>@eSYc13&Q#zY)avf+84#BLqSs6hb2m z!Xg~PBLdo@9U>zNq9PiSTi6tcDaJx<#6eudLqa4%VkALQBtvqfKznpRYNSD0q(gdS zKt^OjW@JHDWJ7l3Ku+XBZsb8!>k@+VXp!0Lmja$>Y+XwpdlKeF>Y&o2hGIhXn~e!h1O_;07OJ2 ze8&%@L@IPdCv-*^bVWCGM-TKwFZ4zq48w5r#{m3^ff$6r7=oc#j|~`skr;*1_zPn& z7UM7;6EG2zFd0)Y71J;sGcXggFdK6)7xOS53$PH2uoz3Q6w9z2E3gu)uo`Qy7VA*i zR#d@8aT7LU3$|h#wqpl&Vi$H}5B6do_TvB!;t&qw2#(?y{>J}s94BxRr*Il)a2Drq z9vASJH_b7)EMCEQaRRQ3H*gcT@DKh)Vz+$~+!gQPw!`k=q4)@o@dQut4A1cbFYyYm z@dj`44)5^+AMpvF@daP;4b3cjbNm#4;WvU9UwefP2rh;|NQ6RpR6uBiK}A$Tctk)% zM8auZok3->3Zf$hVj>n|BM#ys9^xYb5+c!muNi3qBo&h(B~l?Z(jYC;p{IvYFJu%m zAv3ZdE3zRwav&#iAvf|MFY+Nj3ZNhgp)iV|D2kytN}wc4p)|^%EXtv_OW+TL6~iGa zqM<6Pp*m`yCTgKJ{D&n0bx;@eP(P6F0SyA}-(~uLKj>`a<;IwdDd>uBXo99_hURF6 z)>w;mXp44ej}GXJE?A0X7=e-KjvnZVUg(WJ=!<^nj{*1-12G7LF$6;~48zd^EinqC zF&5)69uqJX)36bnFcFh58*?xnGcXgguoJs57xOS53$PH2uoz3w5uLCcE3gu)uo`R7 z1_4-)4cLq=*otk~jve?5W3U^00_h%5E$|O+`gfV}-xk;KCpk4y3$@`lNe9$HUDQK; zG(bZ%LSr;RJG4i0v_MO=LTj`^0NSD{ng!B5;C4#Gnfc#v?goAxa1ZzK01xp9kMRUg z@eI%L0x$6jb5jK}53j{Hc#C%kn>vti_$YqDXMDj|e8YGAz)$?bZv@eEPy|D8gg{7y zLTH3RScF4Kr!0l=Vgy7)Bt%9OL`5`2M-0S7EW}0}#6>)8#ug+*A|yrt^6hToGLvfTqNt8lq zltEdPLwQs{MN~p%R6$i#Lv_?ZP1Hhd{PEvwrdkX2#QJD}hG>MwXo99_hURF2mgtQ> zXoCQ>MLV=d2VBG@bVMg~Mi+ENH!Q_6bVm>LL@%^LYxKte{E2}Wguxhsp%{kY7=e)( zh0*v6V=xxuFdh>y5tA?(Q!o|NFdZ{66SFWIb8s7X5Z)3;z+!O;`ildwTwH<9Vi&9y z*Wf-LU@g|6g(fYrQQU<1NPw-_hV9sao!Eul*n_>;hi1BJj{V{R9K<0U#WDPi|KT`J z;3Q7rG|u2G&fz>R;7{ET#AWdcuHqW5;|6Zx7XHD%c#3DZi+ebXBY22Mc#J3Li+*^C zS9py#c#C&4_>Ld=iC^e$3O$e*Ne~<%5E7vf8evcng)mh=G`hh1iILxQK`NNPvV$grEq9q)3M3NP(0{h15ucv`B~a$bgK|qT*!?)$cuc)j{+Eip(uY+XwpdlKeF`A$$R_b6CMu;QP3a!xw0ceYMXpau)h)#I# zJ@f;*irw&Ar$Kzx6TQ$Ieb5*EFbIRO&=HF;OdO7{;@~9j3ro$b(oAP*o-aMi+$LS12~97IE*7Wieva2|HEmV!FgQ3MO;Gg?16;9 zRq-0G;|3n$5uW1(Ug8zr;{!h93%=tAe&QE`rU@h%f+GY%A{0U+48kJ5vhbkQrH!6SwhQ8>B{uqE^7>*GbiN7!g6EO)>F%7dZ z2XiqG^RWO6u?Wkt0;{nG>#+gn4ebJUio38E`*0A4a1_VzH~xo{IEB+VgNwL?%eaES z1Am`)!2fVvyn&mzg@5obZsQK_;{jgc6+Yq71AOdG9nYQA{+9f0E(joN}?1>qYTQT9IB%RYN8fu;}6t9 zT{J*LG(uxELvt(-{N;xME6`c&g0AR>?&yJ@=!M?sgZ>zRKQR!4Fc`xy93wCi<1qnK zF%9#v0E@8%OR)^gu>vcx3ahaO8?g!7u>(7?3%jug`*8pVaR`TT1V?cUf8&2RjuSYG zbGV30xQ-jRiCcJ#CwPiyc!^hdjW>9Ucld};_>3?3jvx4qAn5`LieLzdP>6^~h>jSD ziCBn>c!-Y#NQgv8juc3b49JK~$cbFYjXcPUd^qY-IflYw5fn!Wltd|%Mj4bvIh02Q z)InX;Lwz(rLo`BTv_Sycq8&P;3%a5kx}yhrqYwI`9|mDChF~a0U?fIiG{#~a#$y5| zViG1}3T9&t=3@aCV+odH1y*AX)?yttViPuF3wB}`c4H3?;|Px882-lpa2zLa7Uyst z7jO}ma2Z!{6Sr_55AYC=@EA|<6wmM+FYp%c@E#u!G<_h!5F8;88etF?;ZQVNAjJ?- zjD*ODf~bgwgh+(sNP(=#hTO=5{3w8eD1@RYhT4JD1-8-fQqPugc$=#gxcaC zsE7JUtdIl^#D=JCGJl|n*c8ps94*iiZ4iL==z#9%fu87v-spqA=!ZcVj3F3`5g3V4 z7>&O$24gV}<1qmfF$q&K4bw3LGcgOxu>z~H25Yen8?gy(OgaEt#ckM&eK?3i_#6Mj zMO?yl+`w(z!ClLL@)G3KMcZP48w4Yz)1Xs zF&K++Sm;}dMVKf~!c_b{hoDPS@BRGx|IFAdsh)cMM zTey!0c!)=Mj3;=CXXvW?Zg?%eK~J$4zKcKbr^OzKhzgMq710nKF%T265F2q29|=&? zby^F_#T1Au#zT5B12Q5LvLYK+x~Nwnx0nY7Q3!=m1VvE{rBMbMO(7G;o5BQCRj7vQ zsDVakj3#J`7HEk!2tY@4!dsob!w7LChF~a$VL19@0RF-lOvEHi#uUuP9L&W$EXNA0 z#X79V25iO_Y{fQg#}4eq9vs9W9L5nG#|fOoDcr{cJj5eB#uGfnGrYtrY_y!4@Ll|Y zpguAMLvVyZXoNv{L_kDDLTtoALL@>`B*S?de*vk*G)RkdNRJGdjv3f(Kzoo`%z~`Q zhMdTS{3w8eD1@RYhTCl^+%)i8XeG8rTeL%GbU{~i zLr?TVU-ZKu48|}F#|VtXSd7DXOu$4;!emUrPTR8!OT}eaiB(vOby$xL*o-aMi+$LS z12~COIFAdsj4QZ`YxoEM;yxbWAs*o=p5ZOt;XOX!BR=6PzTrE5;3s||yzPmAh=_!k zh=sU_hxkZ<#7KgqNQRV1g|tYA^vHnB$bziMhJq-B?>6}dii;&s5~WZYWl$MaP!-is z2X#>o_0a$g(Fl#v49(F3EzuV3&>kJo8C}p7-Ov-g&>MZw7ya-j24WBfV;F{G1V&;! zCSW2ap`J^sKBkK^FcY&d9}BP)%di|PaMfYguvT1$jo5_U*n|BzfP*-M!#IM!@jo2L z37o_!T*M{Z#4X&$9o)q|+{Xhv#uGfp3%tZDyu~}b#|M1GCw#>>{KPK=_k+9;2#aut zh)9TyIEagQNRAXpiBw39G)RkdNRJH2ifqV@Jc#N$>u4w_7D929Kxvdgc~n3}R6=D` zK~+>kb<{vj)Ix3ifjX#*v_2H2!(s6V+M@$Hq7yo!3wj6se<1;V&|U0-p6K;o6@Abj z1MnvXVhDy}1V&;MM&mDx!9+~LR7}H6%)(sE!&_cAD%7^rRFqi>X^{@ukpnrA3%QX8d65tKQ2>Qe z1VvE{#Zdw!Q3|C|24ztWRZ$JqQ3JK{2kM~if3KPTAIR*2%Yp_94bcdV(F9G=0xi)B zt5)8e?J{W|-7=ob~hT$0D))9n%*8y+#{w+I68xou zF<36Hz*?-sW^BP$Y{O3M!fx!rUhKm`9KvB7!BHH;-}oO+;uKQ4xKiP=cm-E+4cBo4 zEi`F~yW%}O#WTFe2lVwA?uVRy2$KuH6@r*$Bahd{2rGueR7^u+BtdGVK@*LeVm}Tb zqe3Rk#{y(W4&+826ht8uMiCT6F?`q650n+lp*$*}GOFNDlOBkgVlC8jN7Y9iu`U{* zAsV4EnxHBEvLs{BN^Ffb2tZr3Lq~K%S9C*n^gvJaLT~gze+LRqH&rmv+E2n# zEW>2On1YEKPr_tO!A#7;Y|O!0tiyV2z(#DsPVB;N?7@B42a z3u7=DQ!o|NFdZ{68*?xh^DrL^uoTO1+!Rh=v$zGju?PEc0Eck|$8iEDaSEq#24`^& z*Kq^4aR>MD059I!XPZdAtE9nI$|I$;vpdtAu*C5DUu;MQXnnT zAw4o6GqNBnvf&@M{lB>E8_Fxlt&j)#Q2-@T3Z+p71yKm)Q2`ZE3AOPD>Yy$fpdp%| zDVm`r+@FxahA|_!nreG#!VJ_xjF_vH>Hen}rVK4UKAP(U$ zj^HSc;W$pHLvVyZ zctk)%L_$b*#7XHD%xQ#ow zi+i|_2gvA@nebSAf~R-w5Km`e}l{ zu+_=8A+`?UAf_g<5F2q27xB!F`1X$Sh_-cH}@_IeLVFi$2P||+ zEka?7Qv~HTsep>8gvzLbs;Gv{KImmZJ+VHz+T?Ek?X=h$Z4iLAXovRbfR5;d{SG^T zi@1d2*6##{XfhP_oU%Szpe4pCjKf4s!dQiIm@dx1Ow7VUEJ9EhNHDAw*I_+2U^BL0 zE4E>>0ZqX~hfVtLSggZIO-|u7&fuWJA)FU4;HG#BSH)}S?MCc_nBFpCAv<#5wkCIQ z7x!=<5AYC=&{^XysOm9N4eu2`Ah?YWf$N&wKxi=x!Xg~PBLX5L3exH-9b$`dkWIyYb6SYtqf8eaH&Y_9e6wS~aEzlCJ&>DSBx*s})JFF6Ln&7GWutVI@{!J9c0%_F+E`;2;j+Bu?Qp&Y-&C*1%Qq8oqlg`hnZx9o)q| zWOX@aLvfVAONCc>jW-yjFd84lPxy>4_=<1%jvx4mU-*q6MzGL>Y!O0=q3~6w-w<7l zftZMe*ocGpNPwhBhLlK!)JTK0NQaEbgv`i-?8t$_D1yW;q$DUSmP2>32dawIPzQC< z3av5Mr92NE#ZKsqF39M@%!K~p0Q`x8_^$8+^(;w!%vP9#xtNFfSb&9CgvD5brC5d$ zZk&;b?x7z8iID_R4KfKobv?rZ_Af!BHH;ah$+OoWg0G!C9Qc zQzw6h%iDDPzL2u0aZ~A)lmbHElw1qbTOnt1BHfYgvS51^EixxvYM2`XP4s_ z^b~vHqYpNpuuxot{yG?dAsC9!2Kfc!#RNhN~K1!$Um6Nli}SGOpk%uHhg2 zi`%$^ySRs!c!k`$&x7~k2YkjC{KPN(MiA>96u}T2ArKOw5E@|+7U3}8hw=p|E0#k{ zla7Vo3PH>$u0p*3j!;;Dwl1@FNU4wt&D;jfky*@woXCZ|$cOwWfZ`~Dk|>4psDPTN zh1&Q7bx;>a4dWP^iOn(JYAirZw{9%7RcMFy=zz}Xg6A5)KzFeR`l25OVK9255B~Ci zAA?ciXiUX4jKw&lw{#gWU7UfLn1%URfW=sXrC5fQScTPCgOW~J3LC{u7$uIz5GNXn zt~R9`_G+>Zf8&2RjWallb2yI+xQI(AY+;LFr5k${t}EQYZQQ|Iyu*8Zz)Q>i3ZKO< z_=<1%jUdLnUZ)!nQVfN#2#3%vq%epqMnP0WLv+MIOvJ)n!@Y;l-a5h{xk3u$MLzta z@Gr`WV7n9TKohYk+8_Y!(E%ON3BAz=RZXiJhKNHkL>!7% zwy`y4E6l-M%)@*vz(OoSNRtVLjp8Qs75ibkxC8AyXggrHxCiew`G64$BXL;a2-3Td zGT?9Ve^{-_8f?WjoK`r4i@1dAxPhCvg~xb;*LZ`^_<|wcCWhj-7{s*^6u}T2ArKOw z5E@|+7U2*XQ4k$5kPwNG94U|*X)xZ~$pp;x)SHLg3VDzh`H&w4&>cNc7)4Ol#ZV5> zZC(siRH%fWVlUJbYoRv&z+Y~cG5D^@542KfjW!5CPHzdhP#8s!+v6(_`fD-(LogJ3 zHQ9$z;%JP;IQ+DEzc5*xf|Ndcr^0M;4(4JWw!1`kV6nIatL@ertQObcFCC1*W^oIm znpQOI6n7z~iz^pSi)U~a=Wreuklx~Cz-93Y2DwxQa#J zK>$LULMR;7wQKOwVXyEJpYU3fH(2OhZ4o|; zU+@#Z@EbuK5fs4?93c=M5fB+s5Eaqzk4yev#1-QqJ`y0Z8!-yH+LUfM?{>a`jGAOZ zW@N#8jX&V7;od`Gjf_A(w9onM$rxP56$qXbTR z(>#U5mNN;;DwIQBF&}D*wNMZB(Fl#v1WhqjSJTi+Y>mF|qkiZpc0y-#K~MBTU-UzN z48Wflh@{>OlVQ59W?+QENQ}iejK>5_#uQA&G%Um-M7Fh2aMlC#9A;X&Sy-*f8f?TS zY{w21HH>07E}p)dt*FxP$&qGywO-2Y84_c#ao%jW>9Ucld};_=;~R z>S10CKgC}N>Om0DbVk|s2$QOt!#zTB0Ktd$??=@SI z0LjG^NQqSV$MpY2dNBiXA{T;NrVz+47C?2wsDa{Q36w-BR754L*JJ}KyZx%5tH)e7 z^wp#v24OIUVK_!%G7s87XFdZ|n+oik*S;cI~Z;1Ii z2#@gu&+!6ZHTi~NwstsnT8Lc;tq=xb5e}nl<7h+`qah|@At!PnArc`mk{~@Yppivv zjE?ApkKPDAA-^UC5ZQ^MV7&otKxu_CD2sBajv7el@t+8F#JZ@5`e=ZL*zYDefX_Dh z3tqYnUZJxlT`<=JU>nq&l)`9?#W;+|1ROQhV;JVb9FFX6 z*c@1>$s#Pq5-i6Gti&p;#|9j?c_$Fsa)!Z9gGJz z&I?>rxP;5Nf~&ZOfABA6n*J<26d&O+p5QrN;4R+aJANRjO%8_BmgEe=ixChN(a_Am znks;GwQ zsDXN@k49*Wm6l``T8J&t7VXd;9kACexewjN9{9_}Fb2KFKDa7g!!U99f9)^^<1qmr zE$k;u6{lf3W}tyhZix1V)&Vyy$t~p%Dp@ z5d~2Z4bc$;F%b)~kpKyi1WAzsDUk}Pkp^jz37L@%*^vi%kq`M%48>6jrBMl$Q3X|T z!?NGRA7UNUMLpC<12jYcFP0xPiwYq1XNu?1Uk00(ghhj9c)@jo2LDV)X` zoW(U<#|_-XzqpOj_zPJ~AuAq5elIZ24N8a z5fKTI5e?B13$YO@@R<=18VSUNNQUG{h15ucw8(&r$cF4V=GOQddBnUZgu*C>;wXpm zsDO&7gvzLbs;GwQsD;|7i+ZS!255*zXo_ZNiB@QhHfV?T=!Wj-fu1;KiT}nxaS#S$ z2!>+>M&mDx!+1=>WK6+ST(S_CF+-e*MOcg#Sc!F5j}6#}E!c`}*p408iCx%@eb|o! zIEX1OuBrH+cpRs28fS18H*gd8a32rw5RdQ}Pw*7a@EkAj5^wMpU+@(_@DsrdBRE1K zG{PV(wpg64h$Kcv6huWV#6~>C#}4d7A|%EHTQ(7C#I#6<^vHmW$cF65h1|%4yvT?A zD1^c&fs!bN(kO@WsDO&7gvzLbs;GwQsDYaJ19i|CP0$q0&>SrgfVOCd_UM3)=!DMb zg0AR>?&yWy7=S-95Q8urBQP3&VGPD%9L8f3CL@D~LPkszr(*_YVism&4(4JW=3@aC zVi6W&36^37R$>)aV;$CG12$qCwqqA|V5v}DoH98wiJ6fP`B4akQ4GaV3Z+p7Wl;&0Q3X{|12s_#wNV%K(8;aa8C}p7 zz0ey2@F)H?{o5ES4#N-pL@-+s9Am_>n1ZQjs?ZEA(F!XRR$>j-ViPuF2X+G%nyGrfD)A*Tn0%ftxs_a2R*Qy9i-ZLZTRoKq1fVV2p*_}U zvKC#$t~e(CjecT(48WflieWgX$$4C|B$qKpVJv20Hs)Y1=3zcAIP4;xIP58wDJ(}k z#K$VE#s+M}EL%Gpd&Irihy9qN)4A9s?#2n6#5tVD1zf}>T*eh##S$z<4b;Q~Jj63R z#|yl~2dDgqZ{m0Sz)v(*Xo7GEk0^+WH>UpurF47YUp57HP$T8vQ`iwT&BX_`#OCTvDLh4`3< z`B;F3Sc0Wkh1J-E&Devz*rBVPNMZ^}@k#uQOPXB9HC)FX+{G)r#sh~v#5?glf|yKD zd=fw7l}=wHjzV07b69ws(BvediP7=N5`RVht#xjR3M<#{L2!f!t!6hldrLPbcP z#|2!*71TjpT*nRE#4WtX2i(U4JjN3|#|yl~E4;-!e8eYw#}E9(FZ@Oj*V{4b@Qt^-v!T z&=5_~6wS~aEzlCJ&>C$JfcEHs(k5L7-Nhd0iC*Z9KIn^n7=ob~j|rHKIhc>=-@gGmf-I037X&z!QI{6-4onG0^I#fa^8E+ckZ{={pZ>9>#DA< zuCA``Jxo?!;uT)w4c3`S>oFe-un>z-*F9MeTg0u{hVAIBpbvJ5yD?XshkfFH9KvB7 z!BHH;ah$+OoWg0G!C73wWn95kT*GzT!fkwQz>46Wcpne&5W^LWz*vmKGX>A_3a{}e z-r+qy;3L90qv{5%1|o}5kQ^zH5~=Vx@*pkJAtN#&GqNBnvLOdP#g{0Aq9}$JdhinE z#PXYyaV;L;A)eqV&g;en+z@Z#4({R&-XgrFA|NJWAqX`Q z9Wf9W@em&gkQ^zH5~+|D>5v{75Lq{(Agh=S+3^W-A{VNvQVqq$5-5pMD2*~Gi*K+| zf*F{J49JL@sD;|7gT`orhG>MQ2u3Tk zMjNz6L|u!7USe;=H;4()PwbCD7>q^=8e@bw5~DC09TkLNoH!m6FcGH|oW>M!Dz=DQ z(OWnAV3vZ}C?FO@VSI%Z3RYqjR%1OjAOxMT72B{AyKqDUN3mZ#fP*-MKNY+~bTI}l z;u0?73a;WBZsHam;t{Un2A<*>p5q1X<3WJVt>&lo`;6&fzt1?9A+Y&476-PrT7som zhUHj+l~{!%=>r_a8gVVwVLdirBbKKNuman}?bv~x*oEELgT2^?{WyffID(@%hT}Ma zlQ@ObID@k|hx53Ai@1c#xPq&Q?i6C+jMG1h8wzgX7H;DX?&2Qq;{hJx5q`(9OaYGL z5AiXc;3=NrIbPr;Ug0&~;4Rh|u(kM0{2TA_0f#aMIE?VBL_kDDLS#h225dxh#6V2M zLTtoAT*SjQr+*y@#Y9MqBuI*6NRAXpiBw37bV!d3*n+Ldj4a5CY{-sJkOQA0CvxF} zi*ga4i+S(`@**GdBc{QQg#uzhTobROu=o{MwXo9A= zlrg|%oYCl61S@EU=4gSIXoc2jgSKdg_UM3)2tj9bL05D`cl>}K(F6Ak`+f8l`=BrS z;fYgtih<%F48{-)#V`!V2#mxi)X?a+7%h&$Sd7DXOu$4;!emUrPxu+rF#|I(3$rl? zzu;F~z(wrCe$2-LEW{!tH`pm~MZAhlX#;G=YVkMxDZaxSr|=e06+{c$1DlZ%nXnDp zu>(7?3%jugd-1u0=fMH-AP(U$UMP5pW8!gaaqz7;A)dr3oW>cP#W|eET+G8IT*gu? z!!=yT9o)q|+{Xhv#3THUKkyh&@D$JR9Ix;iZ}1j>;vN3N-(f#Ft-tY643|DYctk)% zL_$6bB~c2cQ3hr44a%WBDxe}Np)#tVDypG6YT#Q$v=l@_EwMK0;5*bsJ=DkdXn=-j zgvMxsrU*teG)D_ubJJZ%OR*JNqYc`k9onMMVA_4ctUmbVGOifFIEV zJ<$uj(Fc9e5B)I!12G7LF$6;~48t)3BQXl2F$PiH#L=+JrQMD33MODOreG?j;V1lz zxQ0C*W{R^=R|EC1607iwf?qKg^DrL^un>!|7)!7e%di|PuoA1V8oyx;)?yvjV*@r~ z6EFQJz(<7BtMG_`AlqspvKR$X5e?B11F;bY(^Z~<_+kPiL?R?c5+p@3{D>ad zWU_8XN--5m8`v^PE2cwwWI#q_LS|$^R%AnVe1aVK6giO#x$zmkKwjiSeiT4Ke2GFR zjIZztukkgCpeTx=I7*--N})8$pe(+@U-%pGO^XDmBvwWhR7EvZM-6<7Ak;)H)J7eA zhq|bT`uH9V&=8H#7){U=!Dxo&Xn~e!g`zG%F|-xip*=dFBSO##q3Db*=!$OWjvsKw z&3P6*#a`%*KIn^n=#K#yh(Q>PAsC8b7>*GbiBTAhF(_b46~uUP0w!V-CSwYwVj6zJ z&zO!Gn2A}KjXC%QzhW-tVIdY_F_vH{mSH(oU?o;zHGabyti?L4#~Z`+78}Hk*o4j4 zg00ww?bv~x*oEELgT2^?{WySwIE2GEf}>by!CMrz>WL?D3a7C}!B(6T&*K6vVx6h7 z9#_Px$RU1;8{$pe!fkv+IA?jsPj_(-_fbhfWjqic;t_twA9#%ZZlM8qCO*dtyu?%m z)9^-oi$C!W@9_aQRJn=p4jBP+b$lKoijfc*Q4kf;5FIfP6R{8*aS#{r5FZH;q|ut# zV2WMXZWy zsE!)=7D1?qTBwb>sE7La9u3eCjnEjuXoi+(h1O_;wrGd;=zxw0K_`TwGrFKFx}iIM zz>nyGp6G?%=!3rKhyECVff$5iIF7*>f}t3O;TVCD7=_UogRvNg@tA;#n1uE&PX|mD zr{O33jOmzxnV5yyn1f&NE9PPzYPnFgF<)GOg;<2eSc0WkhUHj+l~{$<_zi2Y7VEGc z8?X_Zuo)Ab!X&K5Z`h$=Cw5^s_FymeVLuMwAg*|cxr!s=QS@{&y>LQ2iBmX@(GD~Q z=fv~4fQyK%sVKN2Ud1(B#|_-XE!@T(+{HcI#{)dXBm9m(@EA|<6wmM+N8H#)@k)G+ z3YNKw_)~m`_o{rrd+`Gctk)%L_%al!2yqigNQE1Kqn_1ir8Wt#6>*B zM*<{7A|ypJBu5IQL@K048cfC%q(wTUM+OXaDTX1lm<3sp4cYMta^O?sL@wmUXZRd> z@CEWBAM#_YK^%uK#X=~IuTT`lP#h&t5~WZYWl$F1pd8Ah0xF^sDx(Ujq8h4WqJf=+ z&z))>)KTyq>Y^U%<9jqfLo`BTG(l4YqZyi`1qNz#5L%0E&=&2`9vv`NH^!l(7=lg+ zMOSn~cl>}K(F1kNuB@bdE7Gf)igSd!?_(*_+ zNQA^lf}}`>?w@6fzMsEjJ8ifX8i8u%6obvF@eiM5eZ zOod~Xo8zdj;CmERPz;U4#%O}32u3qBM+>w>E3`%%v_(5i*3=Yq6hqJnq3Db*=!$OW zjvw&ENk2tTu@`!y5Bj1X`eOj%I+=LbCT>SY4P?S_aRj~=i(src4&yNaEfuuFWN`|n zVj6zJ&zO!Gn2A}KjXC%Qzapm&<-&Y%0TyBrZn;Trkb=1JO2trNNLT%K+cc_bcsE_Z_01eRyjnM>65sYSNjuvQ%R%nejXp44ej}GXF z5OhK)I-?7^q8qy72jn!wxzJPWh2H3czUYVk7=VEoguxhsp%{kY7=e)(h0z#;u^5N( zn1G3xgvpqKshEbJ@H3`k24-Upe!;Jpi+Pxj1z3nhSd1lDie>1SDnJNUimR|1zoC2$ z0sh91@dNb0IR)o&0T*!zmvIGGaShjT12=ICw{Zt|(K=p$Hn=C=$52blFgy}}#~*l% zvZmBGcqTr_3%tZDyv7^6#h-YGzwkHS;{!e-Tz?FcOPNkQB*~94U|zsgN3JkT!XMbVw_vLwaPuXcK7+GK*P|71@v- zpCAW5MQ(hC&yfcs4fZJH6Z4}03gSx?LScM`uTca=Q4GaV0wqxjrBMcD@eRtMJSw0f zDxor}p*m{dTLhsdYN0mj;5*bsJ=DkdXn=-jgvMxsrU*teG)D`xL@TsL8?;3`v_}VY zL)JFs^+I*wm1jB;8)DWJj}-eEW{!##u6;WGAzdmti&p; z#&1}IwOEJs*no}Lgw5E3t=NX`*nyqch27YLz1WBSIDmsF?q({1BjQmU!*QIzNu0uI zoWWTSdhj@hF@dqB` z37+B^p5p~xqNi^3!W;1|{=_@{g}?D0AMg?3;sgkf2#APCh>R$RifD+A7+7NoT8r3X z9K=TgBt#-4MiL}NG9*U|q(myjHud6QgGM)^l1o+@8CA)I%*cYQ$cF6r1Uc|2av~SD zX=*z@7xUl? zXpau)h!Au_C_1AHx}qDp;|KhR9_Wc)=#4(;i+<>j0T_ru7>pqpieVUz5g3V47>zNQ zYRXT;cyR(IViK||_ym*1DVU0B_z6E_I%Z%dW??qw;1~Rgwtny44s*qMn2!Zmh(%b8 zB{*X09mP^{8J1%OR$>+6x{UF#MqG<^SdR_Zh)vjxE!c|LPH+x(h&! zcH@9}5QlIWzi8lB921XYt~d{Cyf>}IDFvr-4(D+J7jX%faRpa#4cBo4H*pKMu~_p< za8JCC2Y84__#J=XF`nQlp5ZxO;3Zz+HQwMY{=_@{g}?D0AMg?3^dLMUAR;0mGNK?V zqG2UgAtquWHsT;I;^B;M;Ae3OmyuXO5+p@3q(myDMjE6=I(%W+^CF{|37L@vS&4JD1#>s z{uJfJ@~D7{i0&yA169PTh%3fJd)?@O8VbHe5Ne_pYU7-fK99O$J=DkdXn=-jgvMxs zrU*teG)D`xL@TsL8?;3`v_}VYLcS@yIcGdyi(;gdMoIIzUYUM z7U@wKC=SA448c$g!*GniNQ}a0jKNrp!+1={F&yi8ggv`i-tjLD!_yjrdDRLqga^o|6jy(7Rd65tKQ2+(; zB?_T1zQWfif}$vf;wXWVD237}gR=Mrva@jK>5_#3W3{6imf5{DhyeD`|k; zm@dx1Ow7V;%)u}C6>~8U^RWP@+(M_ZP+Ww?Sb~@aHWrqPD-c_ZgVo}1ScA3LfQ{IM z&Desi*oN)cft}ce-PnV@*oXZ%fP*-M!#IMYIELfMoH{@joDxst49+6H?k2zm@ggqa zG6uV`hhUfKvl}-Q+{7*1#vR;6LXXNsxF_DnBm9m(@EA|<6n|;-Z#)y9;{{&g6<*^F z-r`TZ!(aFt@9_a25zg;L!y^JBA`&7a3Zfz!q9X=kA{JsJ4hnmSe1*7TJj6!=Bt#-4 zMiL}NG9*U|W8l6~v0DgvvOjsne(?R!0qd zi}oG~9S|heL@m@t9ejtnsE7La9u3eCjnEiP&=kRFhURF28vg0+TeK8gp*7l|E!v?y zI-ny$&>3BD#mQVncku@_NfDqadWyZ!8-36h{m|YpbwGb{00v?Z24e_@Vi<;F1V&;M zMq>=}xuE$mUYvl5n1sogf~lB>pYSuLV+Lko7Jk96n2ULsj|EtWMOcg_Sc+v>julvm zRalMRum)?f4(qW28?gzSu?1Uk&k*0oHgP+4U?+BAH}+sJ_F+E`;2;j+Fpl6Tj^Q{? z;3Q7rG|u2G&fz>R;36*JGOpk%uHiav;3jV2Htygq?%_Tj;2}!8cxCX1_!v*{6y+=) z7YJoo~6 zkq`M%O{3NErC11s@fE&C5v2F3n*qhe5-5pMD2*~Gi*Ha4Gd_j3Id7;14le9Dy&qOytFAaSX;{96BrL zg5el}$qJ@mrZ@{fi9cgHW`vbu7G`4(TACKE@Qe5>=3*WqTh^oC7Yp~V_|iWk6vARv zmS8ECVL4V{C01cIe#089#X77jSDi+EV7L(8xdtB~9TNr7|Xc_dUh5fUQ_k|G(BBLz|-6s| zI@Mk%U@~2o2CVTdU4~?tY!s1uxj{z8nK^Tl7=!$L_iBTAhF&K++7>@~CY*dlJl zHf+ZZ?8GkY#vbg&KJ3Q<9K<0U#!M$Y3&+IcIDwNmh0{2P^SFSExP;3nu4^T5O}vg9 zxQSc1jXSuDv6j4XcpyH+Bm9m(@EA|<6wmM+FYpqt@EWBZybS&n-{CL(jraI~j|k^4 z?7|}gA|etZBMPD-8e$?AVj~XXA|CEo#P1@$m;ecp2#JvdNs$c6kpd}^3aOC>X^{@; zkpUTz37L@v3th5B$S!_@9QYJDkqf!;89qlIe1W{khx{mjg7^}JP#9m~YZO6I6hm>8 zKuMHBX_P@(e1mc*j|!-WN~nw~sEWZBfFY1WMLYcIBELgNF$A3uiq7bQuIPsD_yIqn2YR9xdZQ2e zq96KW00v?Z24e_@Vi<;F1V&;MMq>=dVjRX}0w!V-CSwXN8SKmWN&FepF#|I(3$rl? zk^NdR3g(LQ&|e&Y`Qid>GBGz}vA6_Fu?)+x0xPi!tMMDwU@g{RJvLw?HeoZiU@Nv^ zJ9c0vc40U6U@!JzKMvp^4&gA4;3$saI8NXs&fqN0;XE$jA}--FuHY)J;W}>MCT`(2 z?%*!&;XWSVAs*p({DH@Kf~RxD>90ikQrH! z71@v-pCAW5MQ(hC&yfdTATRPEKMJ5AzC5Zny7`^sDtlN7xhpd-=hHr+Fc5<<7(*}=!!R5pFcPCM8e_1{ zpC)X_L~#=SFj*gCsyGcl;b%<849vtV%*Gu2f?tu=m+o{(>nmkCEL5-vxfOhdrQ$Ly z#|l(XP!X%e->?R2u@3980UNOio3RC3u?^d?13R$`yRip*u@C!k00%MNuc9X4hTK#v8oFpLmDA@HgJ$14_F*Wf0yi837Ry33V0JLsT&uq9X=kA{JsJ4pyuD z8{&%zkPwNG7)g*6$&ef=kP@ko8flOg>5v{7kP(@X8Cj4O+3=%Z5A;9|@l)hPF672% z*rv*MoWWTXP*4zGq7Vw>D}0S2D2iezjuI$|QYeiwNa_oFGL#d`qXH_T5-Ot#s-haI zqXxc35Ne_pYNHOmLtWHEEfcdgrurq)G&FMX#%Q8SQv{T3xR z;36*JGOpk%uHiav;3jV2HvaOp;BPz-AL0>y#~*l%CwPiyc#bocxwCj7zQilM#v8mv zdoM8^@VS%DgZBzP;3LAh1HvN$A|euso4_RyRg8w{h=G`hh1iILxQK^8z1qJ+d@%tM zA`ucJ36dfik|PCDA{A024bmbV!dr$TAfuQGnUMuqkqz1L33A|5Q9BjWHODaTt#Yn21T3 zj47CkY4{00V>)JFCT3wa=HM6nin*AFg;<2eSc0WkhUHj+l~{$<_zi2Y7AO4C!%3_Y z*JA@VViPuF3$|h#wqpl&;+m!9I`)Wru@C!k00(ghhj9c)aSX?C0w-|_r*Q^naSrEk z0T*!zmvIGGaShjT12=ICw{Zt|aS!+L01xp9zvB-)#uGfnGd#x&yu_at#&>ulz75-k zclZl`Bc+FHD!dmzpn==IA;KHF2#APCh>R$RifD+A7>J2jh>bXii+G5S1W1TPNQ@*% zieyNR6iA6wNR2c|i*!hj49JK~$c!w=ifqV^PmlwjA}4YoH$KDX$b;U#)bzm@VqWA! zeiT4Ke2GFRjIZ!Dil8Wpp*TvQBub$)%AhR1K{=F11yn>OR7Mq4MKx5%DUYYq2oh_e z7HXpozC&HqLw$UY255*zXpAOkieNNDbF@H9v_fmNL0hy#dvriYgrE~b5!-Z(gRWvX zymE`b#*bnT^h7W8Mj!M=KlH}{48$M|#t;m}Fbu~CjKnC6#u$vnIE=>xOvEHi#uQA& zH2j30FɀSFWIbMOm(#azt8d@Mj$w^cVR7MEZtmSH(oU?o;zHGabyti?L4#|CV~ zCTzwQY{fQg#}4eoF6_o0?8QFp#{nF~AsogL9K|sl#|fOoDV)X`oW(hu#|2!(C0xc8 zTt!KL;7|%T#GAN<+sJKNe1<#XU8FV9)8T>m5RdRX{=j1#@J0I|+G?O3o+)^a7kG(R zc#SuBi}!Z>fcN4Dd_*{JbKwyI5fKSPy?zWsR52Q&BL-q37BcI27Q_|fAwCiyArc`m zk{~IPAvsbYB~l?Z(%^|kpCY}O0U41AnUMuqkqz1L33A|58Mj4dFHz593#-*FE0jQv^WN1 zF%IJ~0TVF^lQ9KTF%3WAXH3To%)~6r#vJ^DUojW+FdqxB5R0%FORyBnupBGU)-NU6 zVYT=h)?h8xVLdirBQ{|(wqPr^VLNtUC$4GqI`)Wru@C#v+as$F4u}U)*1&#)BjQmM z^%_+SC&ZJeB38v2@hr~aJTBlOI;j$huIPqq3a;Y@ZsHbh;|}iPjDJ-mnh)o93NOT$c!k$UsUQ^^S<)KgfCcm*-mCHf9}&*DghvEKL?onEISuA( zY5}4uh=%BhftZMeFB~W@;)?MQ9|@2UiI5mckQB-AwO;`jK}su0A|28r12Q5L zG9wGJA{%nxQ{+T0fQqPu%BX^>sD|pOfo~Cnny7`^sDtlN7xhpd-=hHzL)i*cBUNtlc&xTk^p*kUSg#dHNTFcY&d8*}gre#KnO!+b2jLM*~! zEWuJN!*Z;^O02?a{Dw7Hi*;C!4cLfH*o-aMif!1A9oUIo*o{5di+$LS12~97IE*7W zieosA6F7-eIE~~UEGckKJdX>wh)cMPE4YelxQ-jRiCeghJGhH`xQ_>Th)4Jxf8a5m z;3*Dy%pS%I@g;(eQ|Xh=_#9h=N*P4QeC07y~g8 z3$YOgaS;#kkpKyi2#JvdNs$c6kpd}^3aOC>_q?CpM|v>>G9nXVTdCt9tC$Vh@d1WMLV=d2XsUTa#}QUp|jWpUC|BQ@dJKD5A;MY^hO`_ML+b%01U(+48{-yp(ci7 z1V&;MMq>=dVjRX}0w!V-CSwW?;2?g&&zO!Gn2A}KjXC%QzhW-tVLldMAr@gVmS8EC zVL4V{C01cIe#089#X79V25iJ8Y{nLB#Wrlm4(!A(?8YAK#XjuEIj=0|aY#IjBRGm< zIF1uIiHN?MM8X;IEY9IPF5n_A;WDn^Dz4!=Zr~;vN3N-*}G?_=s@EB|IV^A|fF&q97`wAv$8*BM*<{7A|yrc6=N}?1>qYTR88kLSC9Nz(h>KWK6+SOv6w38PhQXGcgOZF$cfkSIosc#5OJBV4=7O z>#!b+u>?!849l?sE3pcz@f+58_^wA>gB=f@y%KamYB3Eqsk|9muobx#e1;w3PVB;N z?7?1?RplGBL@OLpa2WHwJ}tm8@i=C>+_P{>JdHCr8}^e1&fKAwCiyArc`mk{~IPAvsbY zB~l?R(jh%EAR{v2gz0kE_avQnD&+s|&;0xqMKIBIM6vUS(gwIX> zJYlP&SOi5;48>6bB~c0mR4$0J;x~Baw;9h-L9B?3zMf@56|pJ`hy_tY{1!o|iCU=JvyKxLeL4Jc%_rC(N*k*?)U*e zq6gA?yr)A%mm(7SD(Hv)7=UuBl*eFk2!>)9hGPUqViZPW48~#{e$v3tm?%!dWK6+S zOv6w38PhQXN9=YKv&A|11;1i0=3zb-AgeD?*|1n#f~8o7xEp)07yGau2XGLFa2T^qxH&i`9>)os#3`J{8JxvAoW})R#3fwD z6L@&ihb!L_kDDLS#fiR768`#6V2MLTtoAT*O0sBtSwWLSiIAQY1riq(Dlf zLTaQzTBJjIWI#q_LS|&a0n6|~)NrcbB8P%c5p0N?A-DJ${!;mGd?DsVKIBIM6huqA zwL(4*!~FPKK@k*1F%(A$ltd|%Mj4dFHzCfi1fv<6qXk-`63 z#UJn^dY~tIp*Q-VFZ!WB24EltVK9aup#e*T;o=C4#3+o$7>va@OvEHi#uT*n>xMSy zgiuUZFat9&3$rl?zu;HQ#XQW%0xZNLEXEQn#WF0%3arE`tj2FxgSA+P_1J)o*o4j4 zg00ww?bv~x*oEEbZ*T@+pST|fa1hVj_RsMOuW?Mlah$+OoWg0G!C9Qcd0fCnT*75s z!Bt$tb=<&B+`?^~vV@<;J@Gys;2|F2cl?3Jc!H;RhUa*Jmw1KOc!Rh26Ymh;qc;KG ziy!b2;fzptL_kDDLS#fiR768`#6V2M!hCOs3lLX~hw+$z#7KgqNQUG{fs{yv)JTK0 zNQaEbgv`i-tjLD!_yjrdDRLqga^o`;wzhwTFT}jahx{n5pbWkg3!yN+!q+H*q9}&q zD1p)_gR=MrVSB9%+pB)3py;8&Z96mw8;@0CdqfKC(J5qgSgqf}YBdRTsB>t`Hl1>{ zYaeJKM&r(H+60F-Xw$M&XpE3GfhvKMUK>__UBGND5(I^7k+5gT`mm6=ev2gaTO_UD zB3bI(2B#vR$`ajhZ(7r@Kwsx9!lVNoa!(jY1kV4{6k)MU0SbfqH=>Y!7_&4A~Lz zKb_tg7XJ4UbjA^O1wQ}Ju7?CS3tZlIO@d>D><$zM8r~E5=ozv%;C~w4Cj`Z46qsJ^ zL)tZJ8yplaC`w?@&LJ&Z#4$L5A^PXA$?FG2YuPR|I8dw2kj^dAx6)0=-tRM2?ZE#} zDO}a=H7W*GuT&^q*hji@syA!YBsg7Y`*a<;hqh?nF62O<_Q1X&2Lm5HLkay;OF&iF)F z_}@eMUl;47@>c&|ic?|xpZ@RrpZUM+?@)&0Y~ZuizXN_Q@N>_Q^I?1ZJ6A3U|6{g>TmiF!1bRQZNsd=0Bjfei9 zGc)SH%V7PGdx4gMqO@q)G_c?d4T|;e`qHdrU}dTwa$jMzwvD<4mZZ=Y6(X3%&N48V zfq578-;Rbn2yFUiD#rY8i%Q7Du+70OGPL@ilk8Dod(V*H1O8`{{Sg-a_c;C6Bzqhv z|F1drBy9Jm|9$so|Cikz$%TI&`24Rq_9F0e&ybg4d;B|^uY~_G#}a02*{*5JCc&L@ zHfq;2XK16wf#LZ7JKOZnL0|h!RXgx%7p~ktjin1TmhPX%Lf!;Q1M}`};G<{Ap8?Ig z|4ZF>T1wwa&;P$?U|<#acZ`kpKjR*FkOa;lxZ8g{@cs%r(%+hj6L?@mYt*LAKaZ&Q zVFe$;3d$BgwQyg-8kh10Ds&1C4Q<)3d8d$%VHLs^HAsn@v}xa{b%Wrxjas&85LlZ- zf;)8zJdMH!3Wo%x4m?`hHfYwqeQ3M(p}`IQvn(~}6j-@~Lm~t!4GoIXqEVZ`Qzy7f zaGQ{bfh`px2PNnf+$6M9=f+|4wn4XM?L*pzL<*}MFF5ebZQ7tyX!pPcXb~J78WK5d zd#nz@9U3%l)G4?@=a4obQ3Bh7(zXZGY?2X3SoA#nm*f^yW$maSCj5@F#z7H-+jR*^5C|t^9EvZTSrhx$oNuqA_&Y{h67R=eHW%H1v zf!sMLUfB8&co7PmZsX1^+cXVH7TDf1C{AESXb`xV|Ex=$LXwAVjoYAI%g_!jo3w7w XvU$7qfdNVp*!<6{RZ7h@&h`HQdz%3c diff --git a/doc/doctrees/index.doctree b/doc/doctrees/index.doctree deleted file mode 100644 index 063112f0bb0a6c172a253f39970fc7cfaad61000..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7785 zcmds6d3YUHb$4uQd$MFnjzfs;Wa1>Yl30>0vKX-%hXkWoNo?Ol0bZWxy}8mIY2M7- zb7!;=55z!-#U27-4@>DrY3WK^N;lfljh60oqkEwn-RMGF3jLisGtW6w}B#hXS1t}rpWcP16XCas&qPoPZ{);u|9bYG?`VlZ~e$XZhxdqxXo4nt9rlaZQ*>KE4p&d>#O<()$68Wd*FvsADAA3*uwYvAgu0f zC_nH3mL9F@8%HAUcIjy^CeIsilYQ|9- zdKG;ul)Np|x0fz0^#U$Ai9X7zJ+n|vPl=sL)JRCmv1M}OSdxC0+ph3TsqeEJCRTp9 zGVXd_o<9X+ML!3VJ~z|PD<{3J0Hdd%(9h>NkHMQ^+N?{Go_@i}FK$!AqRYmHJ}xR2 zHxq@ys=i}QY@ZLJx_;pr;|BC{7_UbJAfU-d1l(EGcZqK0dmg;@qBXI@4;w*>O!7JB zPE_@aMQ_tx@noDRJw3gZp&tS47CxEQ3k?U?XOq606+Ow|h^{~`$zXbGu0oj^a55YU zO>8VpUm>=0u@F5E96L30V&*PAgNa&@-Luh8$Vhh{Fsq8b7nt3b=~FponDumLv<$03 zj+8U%$m|*t&P|gDaqBeP@L=Nj@Fyh`b3o8(MqV07=U|b7(0&yhqC zO*#8#qLP90!$S$Pb#4D4Ck~`*q|;EcvFNO#lsG1q0kUS^Ry2S$9=iZkAS&*yR6N;;$mJ5GUos;`*sQrIO2R0SHIulM=$B4k zCVE&o&g++Dx;m=5#kFODvD%Kcs_zSSty)j86X-Vq&4Y|h|)KwM(g^>LJhLQFW)eD(kETg(^V_5Mj z8DzRy4i`YGJ&a$tE^Bq99&%9~bga5v2CSz9OphvFKPc0NWAOs_&2#j}vLbz{An^JU=G0v}#j&h)tw zhC(kDX5S@yd<_wEPwQ2;`s7^s7qw4;S&;W#8*pv*_3Sx1t~2Ty%{K zNkzXF@*m6e1%|%|@E=RDaU-MGSn%sQ1rMw9Trge_Oxwxd0N~!3={GTOYXI)Cyz~_) z@KpNEoeOl1Q9bfjEBY-^59l6?^AHnf@jQWjjE*19eq@m~Hsx zPS#=d6x;AEu%Si!txY5Ehpb-|e4!Zdk7W8gST?)z9F|dc^_>1Fi~r6` zivKQ)?Z-0x-7NE?kol2zY`>?I@xXM2_4;1uHH3NYX(a4bXY$rVw(9HegO1;y=^tQ4 zHY30kA1fhNZLoek(?1BFPFR0vGadR%SbsQ&^%I%?k<#@0B^h3KKgp*5Xealu`Z!zg zV@1%HtDsJW#P7&_-mV4Fi?8x*I~u~y%N6xG=4MFzXcN&EA&|l0~^1~E^434^lvj#_UKo@ zZ5OrQS*NW2T~)J-+V80zyQp1Ns{Q-3` zv6j$31{Rl)^d~UrPc!{zY!FNRyftX!qE$}=?4XKO>(AllFJ$_QrJaQ$qQ8ww{{@HY zUyh3Id{F<@>}4?JrcE|LdrQ;*x?`>3RkG&eXkO8OgGs-X>Az)Ac0$q=&Z$#p=VfS5 zJ7@LZLH94@h={%xpZDQoA6HWB6l z;i+)GwgTNM`d^{(S2F!?rPwbQ+U;m3)Bny2{>P~30S_O0X1BpO+}nX=mqPv33+O`N zrE3?j|Fmb>(r)|4i<;Q1T1Jz2`fJ7Msvk^pB@X$bS1y;2oUwv3n~wW(yUE_fu7E??15iw9uKOB z1FP3npk654y$AOh?d7>up&E4AXPyTkvv$4kZO zT8zQt`|+32b-0!uXCaE zpWMXFJbsuZeIp*XS{(s(>G4V2XY>r786LNa!Q;2s=iKAB@_p`co@SlLZ^OWPkF%j{ z`R)9FWcH%kbQB}-_cQUA(G;$wzgdM%{f!lte_RMee?o3(<|1mM`|>Ps%!q+jvjerp z(6g=J=in-OF~BDXL(jE?&*Q-%v)ZgjL9OI6^n46g=@`EpUe`Ra@?K!y9_P20Z(yxi zf;%k13z?t~<<_<}4Bcr*@4}UrII*Mb+i=U2UW8$EYFf6G-Dg85EZd85W$kUFiD=rA z_xfJm&Y`>UjE>GgyRwE(TGkn69Vqq4HM|F78QsfohPa!6ZM(nkX#TrcEHEX_>KjIMU1-P9DnHXa04Hy;X4}b+|4O=~! zf@Yw`OEk`8Ja)t3TRb9u9F{9Snx4q6)D$Qh7TqPf(XgBI=P9uwt3HC7<#!KA7S+HeabfE3sfGqGW+u$E_T-8BAv6ugPad-ft=aV zof|wxufVYAN|U*f-GcaP z%iD)agTCdQze2BJGEOw}bD%Y;>o265Ss~l6I1P758s9yd^1QEY;Fv44X$0p>n+T@)%WOez8OR^ zwEO+77+e#3^1PRU^hI$ zj<-n2etfP(Qs-k&=3@gEl~!4Q2MbT@V)w>{%y!u6oy@ewe*L3&;Xb2x<2tHpIB!0L z^Y?XXS1Lx v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -html_logo = "_static/pyslurm-docs.png" - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'PySlurmDoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'PySlurm.tex', 'PySlurm Documentation', - 'Mark Roberts, Giovanni Torres', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'pyslurm', 'PySlurm Documentation', - ['Mark Roberts, Giovanni Torres'], 1) -] - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index b76e46c6..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,118 +0,0 @@ -.. PySlurm documentation master file, created by - sphinx-quickstart on Thu Sep 8 18:50:27 2011. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -PySlurm: Slurm Interface to python -=================================== - -:Authors: Mark Roberts, Giovanni Torres -:Date: |today| -:Version: |version| - -This module provides a low-level Python wrapper around the Slurm C-API using Cython. - -Contents -======== - -.. toctree:: - :maxdepth: 2 - :numbered: - -Config Class -************ - -.. autoclass:: pyslurm.config - :members: - -FrontEnd Class -************** - -.. autoclass:: pyslurm.front_end - :members: - -HostList Class -************** - -.. autoclass:: pyslurm.hostlist - :members: - -Job Class -********* - -.. autoclass:: pyslurm.job - :members: - -JobStep Class -************* - -.. autoclass:: pyslurm.jobstep - :members: - -Node Class -********** - -.. autoclass:: pyslurm.node - :members: - -Partition Class -*************** - -.. autoclass:: pyslurm.partition - :members: - -Reservation Class -***************** - -.. autoclass:: pyslurm.reservation - :members: - -Slurmdb Events Class -******************** - -.. autoclass:: pyslurm.slurmdb_events - :members: - -Slurmdb Reservations Class -************************** - -.. autoclass:: pyslurm.slurmdb_reservations - :members: - -Slurmdb Clusters Class -********************** - -.. autoclass:: pyslurm.slurmdb_clusters - :members: - -Slurmdb Jobs Class -****************** - -.. autoclass:: pyslurm.slurmdb_jobs - :members: - -Statistics Class -**************** - -.. autoclass:: pyslurm.statistics - :members: - -Topology Class -************** - -.. autoclass:: pyslurm.topology - :members: - -Trigger Class -************* - -.. autoclass:: pyslurm.trigger - :members: - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/doc_requirements.txt b/doc_requirements.txt new file mode 100644 index 00000000..7497dfa3 --- /dev/null +++ b/doc_requirements.txt @@ -0,0 +1,6 @@ +cython>=3.0.0b1 +wheel +setuptools +mkdocstrings[python] +mike +mkdocs-material diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..a3097617 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,52 @@ +--- +hide: + - navigation +--- +# PySlurm: Slurm Interface to python + +This module provides a low-level Python wrapper around the Slurm C-API using Cython. + +::: pyslurm.config + handler: python + +::: pyslurm.front_end + handler: python + +::: pyslurm.hostlist + handler: python + +::: pyslurm.job + handler: python + +::: pyslurm.jobstep + handler: python + +::: pyslurm.node + handler: python + +::: pyslurm.partition + handler: python + +::: pyslurm.reservation + handler: python + +::: pyslurm.slurmdb_events + handler: python + +::: pyslurm.slurmdb_reservations + handler: python + +::: pyslurm.slurmdb_clusters + handler: python + +::: pyslurm.slurmdb_jobs + handler: python + +::: pyslurm.statistics + handler: python + +::: pyslurm.topology + handler: python + +::: pyslurm.trigger + handler: python \ No newline at end of file diff --git a/doc/source/_static/pyslurm-docs.png b/docs/pyslurm-docs.png similarity index 100% rename from doc/source/_static/pyslurm-docs.png rename to docs/pyslurm-docs.png diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css new file mode 100644 index 00000000..18fe35d6 --- /dev/null +++ b/docs/stylesheets/extra.css @@ -0,0 +1,4 @@ +/* Maximum space for text block */ +.md-grid { + max-width: 70%; +} diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..8e18475a --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,19 @@ +site_name: pyslurm +theme: + name: "material" + logo: pyslurm-docs.png +plugins: +- search +- mkdocstrings: + handlers: + python: + options: + filters: ["!^_"] + docstring_style: sphinx + show_signature: true + show_root_heading: true +extra: + version: + provider: mike +extra_css: + - stylesheets/extra.css diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index 58e5e951..84869d05 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -1,4 +1,3 @@ -# cython: embedsignature=True # cython: profile=False # cython: language_level=3 # cython: auto_pickle=False @@ -274,10 +273,11 @@ ctypedef struct config_key_pair_t: def get_controllers(): - """Get information about slurm controllers. + """ + Get information about slurm controllers. :return: Name of primary controller, Name of backup controllers - :rtype: `tuple` + :rtype: tuple """ cdef: slurm.slurm_conf_t *slurm_ctl_conf_ptr = NULL @@ -305,12 +305,13 @@ def get_controllers(): def is_controller(Host=None): - """Return slurm controller status for host. + """ + Return slurm controller status for host. :param string Host: Name of host to check :returns: None, primary or backup - :rtype: `string` + :rtype: string """ control_machs = get_controllers() if not Host: @@ -326,10 +327,11 @@ def is_controller(Host=None): def slurm_api_version(): - """Return the slurm API version number. + """ + Return the slurm API version number. :returns: version_major, version_minor, version_micro - :rtype: `tuple` + :rtype: tuple """ cdef long version = slurm.SLURM_VERSION_NUMBER @@ -339,10 +341,11 @@ def slurm_api_version(): def slurm_load_slurmd_status(): - """Issue RPC to get and load the status of Slurmd daemon. + """ + Issue RPC to get and load the status of Slurmd daemon. :returns: Slurmd information - :rtype: `dict` + :rtype: dict """ cdef: dict Status = {}, Status_dict = {} @@ -391,7 +394,8 @@ def slurm_init(conf_file=None): slurm.slurm_init(NULL) def slurm_fini(): - """Call at process termination to cleanup internal configuration + """ + Call at process termination to cleanup internal configuration structures. :returns: None @@ -404,7 +408,9 @@ def slurm_fini(): # def get_private_data_list(data): - """Return the list of enciphered Private Data configuration.""" + """ + Return the list of enciphered Private Data configuration. + """ result = [] exponent = 7 @@ -420,7 +426,9 @@ def get_private_data_list(data): return result cdef class config: - """Class to access slurm config Information.""" + """ + Class to access slurm config Information. + """ cdef: slurm.slurm_conf_t *slurm_ctl_conf_ptr @@ -439,32 +447,36 @@ cdef class config: self.__free() def lastUpdate(self): - """Get the time (epoch seconds) the retrieved data was updated. + """ + Get the time (epoch seconds) the retrieved data was updated. :returns: epoch seconds - :rtype: `integer` + :rtype: integer """ return self._lastUpdate def ids(self): - """Return the config IDs from retrieved data. + """ + Return the config IDs from retrieved data. :returns: Dictionary of config key IDs - :rtype: `dict` + :rtype: dict """ return self.__ConfigDict.keys() def find_id(self, char *keyID=''): - """Retrieve config ID data. - + """ + Retrieve config ID data. :param str keyID: Config key string to search :returns: Dictionary of values for given config key - :rtype: `dict` + :rtype: dict """ return self.__ConfigDict.get(keyID, {}) cdef void __free(self): - """Free memory allocated by slurm_load_ctl_conf.""" + """ + Free memory allocated by slurm_load_ctl_conf. + """ if self.__Config_ptr is not NULL: slurm.slurm_free_ctl_conf(self.__Config_ptr) self.__Config_ptr = NULL @@ -472,14 +484,17 @@ cdef class config: self.__lastUpdate = 0 def display_all(self): - """Print slurm control configuration information.""" + """ + Print slurm control configuration information. + """ slurm.slurm_print_ctl_conf(slurm.stdout, self.__Config_ptr) cdef int __load(self) except? -1: - """Load the slurm control configuration information. + """ + Load the slurm control configuration information. :returns: slurm error code - :rtype: `integer` + :rtype: integer """ cdef: slurm.slurm_conf_t *slurm_ctl_conf_ptr = NULL @@ -495,10 +510,11 @@ cdef class config: return errCode def key_pairs(self): - """Return a dict of the slurm control data as key pairs. + """ + Return a dict of the slurm control data as key pairs. :returns: Dictionary of slurm key-pair values - :rtype: `dict` + :rtype: dict """ cdef: void *ret_list = NULL @@ -534,10 +550,11 @@ cdef class config: return keyDict def get(self): - """Return the slurm control configuration information. + """ + Return the slurm control configuration information. :returns: Configuration data - :rtype: `dict` + :rtype: dict """ self.__load() self.__get() @@ -545,10 +562,11 @@ cdef class config: return self.__ConfigDict cpdef dict __get(self): - """Get the slurm control configuration information. + """ + Get the slurm control configuration information. :returns: Configuration data - :rtype: `dict` + :rtype: dict """ cdef: void *ret_list = NULL @@ -819,7 +837,9 @@ cdef class config: cdef class partition: - """Class to access/modify Slurm Partition Information.""" + """ + Class to access/modify Slurm Partition Information. + """ cdef: slurm.partition_info_msg_t *_Partition_ptr @@ -836,18 +856,20 @@ cdef class partition: pass def lastUpdate(self): - """Return time (epoch seconds) the partition data was updated. + """ + Return time (epoch seconds) the partition data was updated. :returns: epoch seconds - :rtype: `integer` + :rtype: integer """ return self._lastUpdate def ids(self): - """Return the partition IDs from retrieved data. + """ + Return the partition IDs from retrieved data. :returns: Dictionary of partition IDs - :rtype: `dict` + :rtype: dict """ cdef: int rc @@ -872,21 +894,23 @@ cdef class partition: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) def find_id(self, partID): - """Get partition information for a given partition. + """ + Get partition information for a given partition. :param str partID: Partition key string to search :returns: Dictionary of values for given partition - :rtype: `dict` + :rtype: dict """ return self.get().get(partID) def find(self, name='', val=''): - """Search for a property and associated value in the retrieved partition data. + """ + Search for a property and associated value in the retrieved partition data. :param str name: key string to search :param str value: value string to match :returns: List of IDs that match - :rtype: `list` + :rtype: list """ cdef: list retList = [] @@ -901,7 +925,8 @@ cdef class partition: return retList def print_info_msg(self, int oneLiner=0): - """Display the partition information from previous load partition method. + """ + Display the partition information from previous load partition method. :param int oneLiner: Display on one line (default=0) """ @@ -924,12 +949,13 @@ cdef class partition: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) def delete(self, PartID): - """Delete a give slurm partition. + """ + Delete a give slurm partition. :param string PartID: Name of slurm partition :returns: 0 for success else set the slurm error code as appropriately. - :rtype: `integer` + :rtype: integer """ cdef: slurm.delete_part_msg_t part_msg @@ -948,10 +974,11 @@ cdef class partition: return errCode def get(self): - """Get all slurm partition information + """ + Get all slurm partition information :returns: Dictionary of dictionaries whose key is the partition name. - :rtype: `dict` + :rtype: dict """ cdef: int rc @@ -1126,36 +1153,39 @@ cdef class partition: def update(self, dict Partition_dict): - """Update a slurm partition. + """ + Update a slurm partition. :param dict partition_dict: A populated partition dictionary, an empty one is created by create_partition_dict :returns: 0 for success, -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ cdef int errCode = slurm_update_partition(Partition_dict) return errCode def create(self, dict Partition_dict): - """Create a slurm partition. + """ + Create a slurm partition. :param dict partition_dict: A populated partition dictionary, an empty one can be created by create_partition_dict :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ cdef int errCode = slurm_create_partition(Partition_dict) return errCode def create_partition_dict(): - """Returns a dictionary that can be populated by the user + """ + Returns a dictionary that can be populated by the user and used for the update_partition and create_partition calls. :returns: Empty reservation dictionary - :rtype: `dict` + :rtype: dict """ return { 'Alternate': None, @@ -1177,13 +1207,14 @@ def create_partition_dict(): def slurm_create_partition(dict partition_dict): - """Create a slurm partition. + """ + Create a slurm partition. :param dict partition_dict: A populated partition dictionary, an empty one is created by create_partition_dict :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ cdef: slurm.update_part_msg_t part_msg_ptr @@ -1208,13 +1239,14 @@ def slurm_create_partition(dict partition_dict): def slurm_update_partition(dict partition_dict): - """Update a slurm partition. + """ + Update a slurm partition. :param dict partition_dict: A populated partition dictionary, an empty one is created by create_partition_dict :returns: 0 for success, -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ cdef: slurm.update_part_msg_t part_msg_ptr @@ -1273,11 +1305,12 @@ def slurm_update_partition(dict partition_dict): def slurm_delete_partition(PartID): - """Delete a slurm partition. + """ + Delete a slurm partition. :param string PartID: Name of slurm partition :returns: 0 for success else set the slurm error code as appropriately. - :rtype: `integer` + :rtype: integer """ cdef: slurm.delete_part_msg_t part_msg @@ -1301,11 +1334,12 @@ def slurm_delete_partition(PartID): cpdef int slurm_ping(int Controller=0) except? -1: - """Issue RPC to check if slurmctld is responsive. + """ + Issue RPC to check if slurmctld is responsive. :param int Controller: 0 for primary (Default=0), 1 for backup, 2 for backup2, ... :returns: 0 for success or slurm error code - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_ping(Controller) @@ -1318,10 +1352,11 @@ cpdef int slurm_ping(int Controller=0) except? -1: cpdef int slurm_reconfigure() except? -1: - """Issue RPC to have slurmctld reload its configuration file. + """ + Issue RPC to have slurmctld reload its configuration file. :returns: 0 for success or a slurm error code - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_reconfigure() @@ -1334,7 +1369,8 @@ cpdef int slurm_reconfigure() except? -1: cpdef int slurm_shutdown(uint16_t Options=0) except? -1: - """Issue RPC to have slurmctld cease operations. + """ + Issue RPC to have slurmctld cease operations. Both the primary and backup controller are shutdown. @@ -1343,7 +1379,7 @@ cpdef int slurm_shutdown(uint16_t Options=0) except? -1: 1 - slurmctld generates a core file 2 - slurmctld is shutdown (no core file) :returns: 0 for success or a slurm error code - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_shutdown(Options) @@ -1356,12 +1392,13 @@ cpdef int slurm_shutdown(uint16_t Options=0) except? -1: cpdef int slurm_takeover(int backup_inx) except? -1: - """Issue a RPC to have slurmctld backup controller take over. + """ + Issue a RPC to have slurmctld backup controller take over. The backup controller takes over the primary controller. :returns: 0 for success or a slurm error code - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_takeover(backup_inx) @@ -1370,11 +1407,12 @@ cpdef int slurm_takeover(int backup_inx) except? -1: cpdef int slurm_set_debug_level(uint32_t DebugLevel=0) except? -1: - """Set the slurm controller debug level. + """ + Set the slurm controller debug level. :param int DebugLevel: 0 (default) to 6 :returns: 0 for success, -1 for error and set slurm error number - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_set_debug_level(DebugLevel) @@ -1388,12 +1426,13 @@ cpdef int slurm_set_debug_level(uint32_t DebugLevel=0) except? -1: cpdef int slurm_set_debugflags(uint32_t debug_flags_plus=0, uint32_t debug_flags_minus=0) except? -1: - """Set the slurm controller debug flags. + """ + Set the slurm controller debug flags. :param int debug_flags_plus: debug flags to be added :param int debug_flags_minus: debug flags to be removed :returns: 0 for success, -1 for error and set slurm error number - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_set_debugflags(debug_flags_plus, @@ -1407,11 +1446,12 @@ cpdef int slurm_set_debugflags(uint32_t debug_flags_plus=0, cpdef int slurm_set_schedlog_level(uint32_t Enable=0) except? -1: - """Set the slurm scheduler debug level. + """ + Set the slurm scheduler debug level. :param int Enable: True = 0, False = 1 :returns: 0 for success, -1 for error and set the slurm error number - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_set_schedlog_level(Enable) @@ -1429,11 +1469,12 @@ cpdef int slurm_set_schedlog_level(uint32_t Enable=0) except? -1: cpdef int slurm_suspend(uint32_t JobID=0) except? -1: - """Suspend a running slurm job. + """ + Suspend a running slurm job. :param int JobID: Job identifier :returns: 0 for success or a slurm error code - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_suspend(JobID) @@ -1446,11 +1487,12 @@ cpdef int slurm_suspend(uint32_t JobID=0) except? -1: cpdef int slurm_resume(uint32_t JobID=0) except? -1: - """Resume a running slurm job step. + """ + Resume a running slurm job step. :param int JobID: Job identifier :returns: 0 for success or a slurm error code - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_resume(JobID) @@ -1463,11 +1505,12 @@ cpdef int slurm_resume(uint32_t JobID=0) except? -1: cpdef int slurm_requeue(uint32_t JobID=0, uint32_t State=0) except? -1: - """Requeue a running slurm job step. + """ + Requeue a running slurm job step. :param int JobID: Job identifier :returns: 0 for success or a slurm error code - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_requeue(JobID, State) @@ -1480,11 +1523,12 @@ cpdef int slurm_requeue(uint32_t JobID=0, uint32_t State=0) except? -1: cpdef long slurm_get_rem_time(uint32_t JobID=0) except? -1: - """Get the remaining time in seconds for a slurm job step. + """ + Get the remaining time in seconds for a slurm job step. :param int JobID: Job identifier :returns: Remaining time in seconds or -1 on error - :rtype: `long` + :rtype: long """ cdef int apiError = 0 cdef long errCode = slurm.slurm_get_rem_time(JobID) @@ -1497,11 +1541,12 @@ cpdef long slurm_get_rem_time(uint32_t JobID=0) except? -1: cpdef time_t slurm_get_end_time(uint32_t JobID=0) except? -1: - """Get the end time in seconds for a slurm job step. + """ + Get the end time in seconds for a slurm job step. :param int JobID: Job identifier :returns: Remaining time in seconds or -1 on error - :rtype: `integer` + :rtype: integer """ cdef time_t EndTime = -1 cdef int apiError = 0 @@ -1515,11 +1560,12 @@ cpdef time_t slurm_get_end_time(uint32_t JobID=0) except? -1: cpdef int slurm_job_node_ready(uint32_t JobID=0) except? -1: - """Return if a node could run a slurm job now if dispatched. + """ + Return if a node could run a slurm job now if dispatched. :param int JobID: Job identifier :returns: Node Ready code - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_job_node_ready(JobID) @@ -1528,12 +1574,13 @@ cpdef int slurm_job_node_ready(uint32_t JobID=0) except? -1: cpdef int slurm_signal_job(uint32_t JobID=0, uint16_t Signal=0) except? -1: - """Send a signal to a slurm job step. + """ + Send a signal to a slurm job step. :param int JobID: Job identifier :param int Signal: Signal to send (default=0) :returns: 0 for success or -1 for error and the set Slurm errno - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_signal_job(JobID, Signal) @@ -1552,13 +1599,14 @@ cpdef int slurm_signal_job(uint32_t JobID=0, uint16_t Signal=0) except? -1: cpdef int slurm_signal_job_step(uint32_t JobID=0, uint32_t JobStep=0, uint16_t Signal=0) except? -1: - """Send a signal to a slurm job step. + """ + Send a signal to a slurm job step. :param int JobID: Job identifier :param int JobStep: Job step identifier :param int Signal: Signal to send (default=0) :returns: Error code - 0 for success or -1 for error and set the slurm errno - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_signal_job_step(JobID, JobStep, Signal) @@ -1572,13 +1620,14 @@ cpdef int slurm_signal_job_step(uint32_t JobID=0, uint32_t JobStep=0, cpdef int slurm_kill_job(uint32_t JobID=0, uint16_t Signal=0, uint16_t BatchFlag=0) except? -1: - """Terminate a running slurm job step. + """ + Terminate a running slurm job step. :param int JobID: Job identifier :param int Signal: Signal to send :param int BatchFlag: Job batch flag (default=0) :returns: 0 for success or -1 for error and set slurm errno - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_kill_job(JobID, Signal, BatchFlag) @@ -1592,13 +1641,14 @@ cpdef int slurm_kill_job(uint32_t JobID=0, uint16_t Signal=0, cpdef int slurm_kill_job_step(uint32_t JobID=0, uint32_t JobStep=0, uint16_t Signal=0) except? -1: - """Terminate a running slurm job step. + """ + Terminate a running slurm job step. :param int JobID: Job identifier :param int JobStep: Job step identifier :param int Signal: Signal to send (default=0) :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_kill_job_step(JobID, JobStep, Signal) @@ -1612,14 +1662,15 @@ cpdef int slurm_kill_job_step(uint32_t JobID=0, uint32_t JobStep=0, cpdef int slurm_kill_job2(const char *JobID='', uint16_t Signal=0, uint16_t BatchFlag=0, char* sibling=NULL) except? -1: - """Terminate a running slurm job step. + """ + Terminate a running slurm job step. :param const char * JobID: Job identifier :param int Signal: Signal to send :param int BatchFlag: Job batch flag (default=0) :param string sibling: optional string of sibling cluster to send the message to :returns: 0 for success or -1 for error and set slurm errno - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_kill_job2(JobID, Signal, BatchFlag, sibling) @@ -1632,12 +1683,13 @@ cpdef int slurm_kill_job2(const char *JobID='', uint16_t Signal=0, cpdef int slurm_complete_job(uint32_t JobID=0, uint32_t JobCode=0) except? -1: - """Complete a running slurm job step. + """ + Complete a running slurm job step. :param int JobID: Job identifier :param int JobCode: Return code (default=0) :returns: 0 for success or -1 for error and set slurm errno - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_complete_job(JobID, JobCode) @@ -1650,12 +1702,13 @@ cpdef int slurm_complete_job(uint32_t JobID=0, uint32_t JobCode=0) except? -1: cpdef int slurm_notify_job(uint32_t JobID=0, char* Msg='') except? -1: - """Notify a message to a running slurm job step. + """ + Notify a message to a running slurm job step. :param string JobID: Job identifier (default=0) :param string Msg: Message string to send to job :returns: 0 for success or -1 on error - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 @@ -1669,13 +1722,14 @@ cpdef int slurm_notify_job(uint32_t JobID=0, char* Msg='') except? -1: cpdef int slurm_terminate_job_step(uint32_t JobID=0, uint32_t JobStep=0) except? -1: - """Terminate a running slurm job step. + """ + Terminate a running slurm job step. :param int JobID: Job identifier (default=0) :param int JobStep: Job step identifier (default=0) :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ cdef int apiError = 0 cdef int errCode = slurm.slurm_terminate_job_step(JobID, JobStep) @@ -1692,7 +1746,9 @@ cpdef int slurm_terminate_job_step(uint32_t JobID=0, uint32_t JobStep=0) except? cdef class job: - """Class to access/modify Slurm Job Information.""" + """ + Class to access/modify Slurm Job Information. + """ cdef: slurm.job_info_msg_t *_job_ptr @@ -1712,26 +1768,29 @@ cdef class job: pass def lastUpdate(self): - """Get the time (epoch seconds) the job data was updated. + """ + Get the time (epoch seconds) the job data was updated. :returns: epoch seconds - :rtype: `integer` + :rtype: integer """ return self._lastUpdate def lastBackfill(self): - """Get the time (epoch seconds) of last backfilling run. + """ + Get the time (epoch seconds) of last backfilling run. :returns: epoch seconds - :rtype: `integer` + :rtype: integer """ return self._lastBackfill cpdef ids(self): - """Return the job IDs from retrieved data. + """ + Return the job IDs from retrieved data. :returns: Dictionary of job IDs - :rtype: `dict` + :rtype: dict """ cdef: @@ -1754,12 +1813,13 @@ cdef class job: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) def find(self, name='', val=''): - """Search for a property and associated value in the retrieved job data. + """ + Search for a property and associated value in the retrieved job data. :param str name: key string to search :param str value: value string to match :returns: List of IDs that match - :rtype: `list` + :rtype: list """ cdef: list retList = [] @@ -1809,7 +1869,8 @@ cdef class job: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) def find_id(self, jobid): - """Retrieve job ID data. + """ + Retrieve job ID data. This method accepts both string and integer formats of the jobid. This works for single jobs and job arrays. It uses the internal @@ -1818,20 +1879,21 @@ cdef class job: :param str jobid: Job id key string to search :returns: List of dictionary of values for given job id - :rtype: `list` + :rtype: list """ self._load_single_job(jobid) return list(self.get_job_ptr().values()) def find_user(self, user): - """Retrieve a user's job data. + """ + Retrieve a user's job data. This method calls slurm_load_job_user to get all job_table records associated with a specific user. :param str user: User string to search :returns: Dictionary of values for all user's jobs - :rtype: `dict` + :rtype: dict """ cdef: int apiError @@ -1855,13 +1917,14 @@ cdef class job: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) cpdef get(self): - """Get all slurm jobs information. + """ + Get all slurm jobs information. This method calls slurm_load_jobs to get job_table records for all jobs :returns: Data where key is the job name, each entry contains a dictionary of job attributes - :rtype: `dict` + :rtype: dict """ cdef: int apiError @@ -1876,10 +1939,11 @@ cdef class job: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) cdef dict get_job_ptr(self): - """Convert all job arrays in buffer to dictionary. + """ + Convert all job arrays in buffer to dictionary. :returns: dictionary of job attributes - :rtype: `dict` + :rtype: dict """ cdef: char time_str[32] @@ -2211,11 +2275,12 @@ cdef class job: return self._JobDict cpdef int __cpus_allocated_on_node_id(self, int nodeID=0): - """Get the number of cpus allocated to a job on a node by node name. + """ + Get the number of cpus allocated to a job on a node by node name. :param int nodeID: Numerical node ID :returns: Num of CPUs allocated to job on this node or -1 on error - :rtype: `integer` + :rtype: integer """ cdef: slurm.job_resources_t *job_resrcs_ptr = self._record.job_resrcs @@ -2224,11 +2289,12 @@ cdef class job: return retval cdef int __cpus_allocated_on_node(self, char* nodeName=''): - """Get the number of cpus allocated to a slurm job on a node by node name. + """ + Get the number of cpus allocated to a slurm job on a node by node name. :param string nodeName: Name of node :returns: Num of CPUs allocated to job on this node or -1 on error - :rtype: `integer` + :rtype: integer """ cdef: slurm.job_resources_t *job_resrcs_ptr = self._record.job_resrcs @@ -2237,11 +2303,12 @@ cdef class job: return retval cdef list __cpus_allocated_list_on_node(self, char* nodeName=''): - """Get a list of cpu ids allocated to current slurm job on a node by node name. + """ + Get a list of cpu ids allocated to current slurm job on a node by node name. :param string nodeName: Name of node :returns: list of allocated cpus (empty, if nothing found or error) - :rtype: `list` + :rtype: list """ cdef: int error = 0 @@ -2262,11 +2329,12 @@ cdef class job: return cpus_list def __unrange(self, bit_str): - """converts a string describing a bitmap (from slurm_job_cpus_allocated_str_on_node()) to a list. + """ + converts a string describing a bitmap (from slurm_job_cpus_allocated_str_on_node()) to a list. :param string bit_str: string describing a bitmap (e.g. "0-30,45,50-60") :returns: list referring to bitmap (empty if not succesful) - :rtype: `list` + :rtype: list """ r_list = [] @@ -2284,12 +2352,15 @@ cdef class job: return r_list cpdef __free(self): - """Release the storage generated by the slurm_get_job_steps function.""" + """ + Release the storage generated by the slurm_get_job_steps function. + """ if self._job_ptr is not NULL: slurm.slurm_free_job_info_msg(self._job_ptr) cpdef print_job_info_msg(self, int oneLiner=0): - """Print the data structure describing all job step records. + """ + Print the data structure describing all job step records. The job step records are loaded by the slurm_get_job_steps function. @@ -2314,15 +2385,15 @@ cdef class job: """ Return the contents of the batch-script for a Job. - Note: The string returned also includes all the "\n" characters - (new-line). + Note: The string returned also includes all the "\\n" characters + (new-line). :param jobid: ID of the Job for which the script should be retrieved. :type jobid: Union[str, int] :raises: [ValueError]: When retrieving the Batch-Script for the Job was not successful. :returns: The content of the batch script. - :rtype: `str` + :rtype: str """ # This reimplements the slurm_job_batch_script API call. Otherwise we # would have to parse the FILE* ptr we get from it back into a @@ -2744,7 +2815,9 @@ cdef class job: return 0 cdef int envcount(self, char **env): - """Return the number of elements in the environment `env`.""" + """ + Return the number of elements in the environment `env`. + """ cdef int envc = 0 while (env[envc] != NULL): envc += 1 @@ -2791,7 +2864,8 @@ cdef class job: return rc def submit_batch_job(self, job_opts): - """Submit batch job. + """ + Submit batch job. * make sure options match sbatch command line opts and not struct member names. """ cdef: @@ -2959,7 +3033,7 @@ cdef class job: To reference a job with job array set, use the first/"master" jobid (the same as given by squeue) :returns: The exit code of the slurm job. - :rtype: `int` + :rtype: int """ exit_status = -9999 complete = False @@ -2985,13 +3059,14 @@ cdef class job: def slurm_pid2jobid(uint32_t JobPID=0): - """Get the slurm job id from a process id. + """ + Get the slurm job id from a process id. :param int JobPID: Job process id :returns: 0 for success or a slurm error code - :rtype: `integer` + :rtype: integer :returns: Job Identifier - :rtype: `integer` + :rtype: integer """ cdef: uint32_t JobID = 0 @@ -3006,14 +3081,15 @@ def slurm_pid2jobid(uint32_t JobPID=0): cdef secs2time_str(uint32_t time): - """Convert seconds to Slurm string format. + """ + Convert seconds to Slurm string format. This method converts time in seconds (86400) to Slurm's string format (1-00:00:00). :param int time: time in seconds :returns: time string - :rtype: `str` + :rtype: str """ cdef: char *time_str @@ -3037,14 +3113,15 @@ cdef secs2time_str(uint32_t time): cdef mins2time_str(uint32_t time): - """Convert minutes to Slurm string format. + """ + Convert minutes to Slurm string format. This method converts time in minutes (14400) to Slurm's string format (10-00:00:00). :param int time: time in minutes :returns: time string - :rtype: `str` + :rtype: str """ cdef: double days, hours, minutes, seconds @@ -3086,10 +3163,11 @@ class SlurmError(Exception): def slurm_get_errno(): - """Return the slurm error as set by a slurm API call. + """ + Return the slurm error as set by a slurm API call. :returns: slurm error number - :rtype: `integer` + :rtype: integer """ cdef int errNum = slurm.slurm_get_errno() @@ -3097,11 +3175,12 @@ def slurm_get_errno(): def slurm_strerror(int Errno=0): - """Return slurm error message represented by a given slurm error number. + """ + Return slurm error message represented by a given slurm error number. :param int Errno: slurm error number. :returns: slurm error string - :rtype: `string` + :rtype: string """ cdef char* errMsg = slurm.slurm_strerror(Errno) @@ -3109,7 +3188,8 @@ def slurm_strerror(int Errno=0): def slurm_seterrno(int Errno=0): - """Set the slurm error number. + """ + Set the slurm error number. :param int Errno: slurm error number """ @@ -3117,7 +3197,8 @@ def slurm_seterrno(int Errno=0): def slurm_perror(char* Msg=''): - """Print to standard error the supplied header. + """ + Print to standard error the supplied header. Header is followed by a colon, followed by a text description of the last Slurm error code generated. @@ -3134,7 +3215,9 @@ def slurm_perror(char* Msg=''): cdef class node: - """Class to access/modify/update Slurm Node Information.""" + """ + Class to access/modify/update Slurm Node Information. + """ cdef: slurm.node_info_msg_t *_Node_ptr @@ -3153,18 +3236,20 @@ cdef class node: pass def lastUpdate(self): - """Return last time (epoch seconds) the node data was updated. + """ + Return last time (epoch seconds) the node data was updated. :returns: epoch seconds - :rtype: `integer` + :rtype: integer """ return self._lastUpdate cpdef ids(self): - """Return the node IDs from retrieved data. + """ + Return the node IDs from retrieved data. :returns: Dictionary of node IDs - :rtype: `dict` + :rtype: dict """ cdef: int rc @@ -3186,19 +3271,21 @@ cdef class node: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) def find_id(self, nodeID): - """Get node information for a given node. + """ + Get node information for a given node. :param str nodeID: Node key string to search :returns: Dictionary of values for given node - :rtype: `dict` + :rtype: dict """ return list(self.get_node(nodeID).values())[0] def get(self): - """Get all slurm node information. + """ + Get all slurm node information. :returns: Dictionary of dictionaries whose key is the node name. - :rtype: `dict` + :rtype: dict """ return self.get_node(None) @@ -3207,11 +3294,12 @@ cdef class node: return re.split(r',(?![^(]*\))', gres_str) def get_node(self, nodeID): - """Get single slurm node information. + """ + Get single slurm node information. :param str nodeID: Node key string to search. Default NULL. :returns: Dictionary of give node info data. - :rtype: `dict` + :rtype: dict """ cdef: int rc @@ -3435,18 +3523,20 @@ cdef class node: cpdef update(self, dict node_dict): - """Update slurm node information. + """ + Update slurm node information. :param dict node_dict: A populated node dictionary, an empty one is created by create_node_dict :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ return slurm_update_node(node_dict) cpdef print_node_info_msg(self, int oneLiner=False): - """Output information about all slurm nodes. + """ + Output information about all slurm nodes. :param int oneLiner: Print on one line - False (Default) or True """ @@ -3467,13 +3557,14 @@ cdef class node: def slurm_update_node(dict node_dict): - """Update slurm node information. + """ + Update slurm node information. :param dict node_dict: A populated node dictionary, an empty one is created by create_node_dict :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ cdef: slurm.update_node_msg_t node_msg @@ -3519,13 +3610,14 @@ def slurm_update_node(dict node_dict): def create_node_dict(): - """Return a an update_node dictionary + """ + Return a an update_node dictionary This dictionary can be populated by the user and used for the update_node call. :returns: Empty node dictionary - :rtype: `dict` + :rtype: dict """ return { 'node_names': None, @@ -3543,7 +3635,9 @@ def create_node_dict(): cdef class jobstep: - """Class to access/modify Slurm Jobstep Information.""" + """ + Class to access/modify Slurm Jobstep Information. + """ cdef: slurm.time_t _lastUpdate @@ -3562,16 +3656,19 @@ cdef class jobstep: self.__destroy() cpdef __destroy(self): - """Free the slurm job memory allocated by load jobstep method.""" + """ + Free the slurm job memory allocated by load jobstep method. + """ self._lastUpdate = 0 self._ShowFlags = 0 self._JobStepDict = {} def lastUpdate(self): - """Get the time (epoch seconds) the jobstep data was updated. + """ + Get the time (epoch seconds) the jobstep data was updated. :returns: epoch seconds - :rtype: `integer` + :rtype: integer """ return self._lastUpdate @@ -3600,17 +3697,19 @@ cdef class jobstep: return retDict cpdef get(self): - """Get slurm jobstep information. + """ + Get slurm jobstep information. :returns: Data whose key is the jobstep ID. - :rtype: `dict` + :rtype: dict """ self.__get() return self._JobStepDict cpdef __get(self): - """Load details about job steps. + """ + Load details about job steps. This method loads details about job steps that satisfy the job_id and/or step_id specifications provided if the data has been updated @@ -3620,7 +3719,7 @@ cdef class jobstep: :param int StepID: Jobstep Identifier :param int ShowFlags: Display flags (Default=0) :returns: Data whose key is the job and step ID - :rtype: `dict` + :rtype: dict """ cdef: slurm.job_step_info_response_msg_t *job_step_info_ptr = NULL @@ -3738,12 +3837,13 @@ cdef class jobstep: self._JobStepDict = Steps cpdef layout(self, uint32_t JobID=0, uint32_t StepID=0): - """Get the slurm job step layout from a given job and step id. + """ + Get the slurm job step layout from a given job and step id. :param int JobID: slurm job id (Default=0) :param int StepID: slurm step id (Default=0) :returns: List of job step layout. - :rtype: `list` + :rtype: list """ cdef: slurm.slurm_step_id_t step_id @@ -3797,7 +3897,9 @@ cdef class jobstep: cdef class hostlist: - """Wrapper class for Slurm hostlist functions.""" + """ + Wrapper class for Slurm hostlist functions. + """ cdef slurm.hostlist_t hl @@ -3827,7 +3929,8 @@ cdef class hostlist: return slurm.slurm_hostlist_count(self.hl) cpdef get_list(self): - """Get the list of hostnames composing the hostlist. + """ + Get the list of hostnames composing the hostlist. For example with a hostlist created with "tux[1-3]" -> [ 'tux1', tux2', 'tux3' ]. @@ -3919,11 +4022,12 @@ cdef class hostlist: cdef class trigger: def set(self, dict trigger_dict): - """Set or create a slurm trigger. + """ + Set or create a slurm trigger. :param dict trigger_dict: A populated dictionary of trigger information :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ cdef: slurm.trigger_info_t trigger_set @@ -3995,10 +4099,11 @@ cdef class trigger: return 0 def get(self): - """Get the information on slurm triggers. + """ + Get the information on slurm triggers. :returns: Where key is the trigger ID - :rtype: `dict` + :rtype: dict """ cdef: slurm.trigger_info_msg_t *trigger_get = NULL @@ -4026,13 +4131,14 @@ cdef class trigger: return Triggers def clear(self, TriggerID=0, UserID=slurm.NO_VAL, ID=0): - """Clear or remove a slurm trigger. + """ + Clear or remove a slurm trigger. :param string TriggerID: Trigger Identifier :param string UserID: User Identifier :param string ID: Job Identifier :returns: 0 for success or a slurm error code - :rtype: `integer` + :rtype: integer """ cdef: slurm.trigger_info_t trigger_clear @@ -4063,7 +4169,9 @@ cdef class trigger: cdef class reservation: - """Class to access/update/delete slurm reservation Information.""" + """ + Class to access/update/delete slurm reservation Information. + """ cdef: slurm.reserve_info_msg_t *_Res_ptr @@ -4081,37 +4189,41 @@ cdef class reservation: self.__free() def lastUpdate(self): - """Get the time (epoch seconds) the reservation data was updated. + """ + Get the time (epoch seconds) the reservation data was updated. :returns: epoch seconds - :rtype: `integer` + :rtype: integer """ return self._lastUpdate def ids(self): - """Return a list of reservation IDs from retrieved data. + """ + Return a list of reservation IDs from retrieved data. :returns: Dictionary of reservation IDs - :rtype: `dict` + :rtype: dict """ return self._ResDict.keys() def find_id(self, resID): - """Retrieve reservation ID data. + """ + Retrieve reservation ID data. :param str resID: Reservation key string to search :returns: Dictionary of values for given reservation key - :rtype: `dict` + :rtype: dict """ return self._ResDict.get(resID, {}) def find(self, name='', val=''): - """Search for property and associated value in reservation data. + """ + Search for property and associated value in reservation data. :param str name: key string to search :param str value: value string to match :returns: List of IDs that match - :rtype: `list` + :rtype: list """ # [ key for key, value in self._ResDict.items() if self._ResDict[key]['state'] == 'error'] @@ -4127,7 +4239,9 @@ cdef class reservation: self.__load() cdef int __load(self) except? -1: - """Load slurm reservation information.""" + """ + Load slurm reservation information. + """ cdef: slurm.reserve_info_msg_t *new_reserve_info_ptr = NULL @@ -4157,16 +4271,19 @@ cdef class reservation: return errCode cdef __free(self): - """Free slurm reservation pointer.""" + """ + Free slurm reservation pointer. + """ if self._Res_ptr is not NULL: slurm.slurm_free_reservation_info_msg(self._Res_ptr) def get(self): - """Get slurm reservation information. + """ + Get slurm reservation information. :returns: Data whose key is the Reservation ID - :rtype: `dict` + :rtype: dict """ self.load() self.__get() @@ -4207,27 +4324,32 @@ cdef class reservation: self._ResDict = Reservations def create(self, dict reservation_dict={}): - """Create slurm reservation.""" + """ + Create slurm reservation. + """ return slurm_create_reservation(reservation_dict) def delete(self, ResID): - """Delete slurm reservation. + """ + Delete slurm reservation. :returns: 0 for success or a slurm error code - :rtype: `integer` + :rtype: integer """ return slurm_delete_reservation(ResID) def update(self, dict reservation_dict={}): - """Update a slurm reservation attributes. + """ + Update a slurm reservation attributes. :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ return slurm_update_reservation(reservation_dict) def print_reservation_info_msg(self, int oneLiner=0): - """Output information about all slurm reservations. + """ + Output information about all slurm reservations. :param int Flags: Print on one line - 0 (Default) or 1 """ @@ -4241,13 +4363,14 @@ cdef class reservation: def slurm_create_reservation(dict reservation_dict={}): - """Create a slurm reservation. + """ + Create a slurm reservation. :param dict reservation_dict: A populated reservation dictionary, an empty one is created by create_reservation_dict :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: `string` + :rtype: string """ cdef: slurm.resv_desc_msg_t resv_msg @@ -4341,13 +4464,14 @@ def slurm_create_reservation(dict reservation_dict={}): return resID def slurm_update_reservation(dict reservation_dict={}): - """Update a slurm reservation. + """ + Update a slurm reservation. :param dict reservation_dict: A populated reservation dictionary, an empty one is created by create_reservation_dict :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ cdef: slurm.resv_desc_msg_t resv_msg @@ -4435,11 +4559,12 @@ def slurm_update_reservation(dict reservation_dict={}): def slurm_delete_reservation(ResID): - """Delete a slurm reservation. + """ + Delete a slurm reservation. :param string ResID: Reservation Identifier :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: `integer` + :rtype: integer """ cdef slurm.reservation_name_msg_t resv_msg @@ -4460,13 +4585,14 @@ def slurm_delete_reservation(ResID): def create_reservation_dict(): - """Create and empty dict for use with create_reservation method. + """ + Create and empty dict for use with create_reservation method. Returns a dictionary that can be populated by the user an used for the update_reservation and create_reservation calls. :returns: Empty Reservation dictionary - :rtype: `dict` + :rtype: dict """ return { 'start_time': 0, @@ -4490,7 +4616,9 @@ def create_reservation_dict(): cdef class topology: - """Class to access/update slurm topology information.""" + """ + Class to access/update slurm topology information. + """ cdef: slurm.topo_info_response_msg_t *_topo_info_ptr @@ -4504,24 +4632,31 @@ cdef class topology: self.__free() def lastUpdate(self): - """Get the time (epoch seconds) the retrieved data was updated. + """ + Get the time (epoch seconds) the retrieved data was updated. :returns: epoch seconds - :rtype: `integer` + :rtype: integer """ return self._lastUpdate cpdef __free(self): - """Free the memory returned by load method.""" + """ + Free the memory returned by load method. + """ if self._topo_info_ptr is not NULL: slurm.slurm_free_topo_info_msg(self._topo_info_ptr) def load(self): - """Load slurm topology information.""" + """ + Load slurm topology information. + """ self.__load() cpdef int __load(self) except? -1: - """Load slurm topology.""" + """ + Load slurm topology. + """ cdef int apiError = 0 cdef int errCode = 0 @@ -4537,10 +4672,11 @@ cdef class topology: return errCode def get(self): - """Get slurm topology information. + """ + Get slurm topology information. :returns: Dictionary whose key is the Topology ID - :rtype: `dict` + :rtype: dict """ self.__load() self.__get() @@ -4570,11 +4706,14 @@ cdef class topology: self._TopoDict = Topo def display(self): - """Display topology information to standard output.""" + """ + Display topology information to standard output. + """ self._print_topo_info_msg() cpdef _print_topo_info_msg(self): - """Output information about topology based upon message as loaded using slurm_load_topo. + """ + Output information about topology based upon message as loaded using slurm_load_topo. :param int Flags: Print on one line - False (Default), True """ @@ -4605,9 +4744,10 @@ cdef class statistics: pass cpdef dict get(self): - """Get slurm statistics information. + """ + Get slurm statistics information. - :rtype: `dict` + :rtype: dict """ cdef: int errCode @@ -4704,7 +4844,8 @@ cdef class statistics: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) cpdef int reset(self): - """Reset scheduling statistics + """ + Reset scheduling statistics This method required root privileges. """ @@ -4981,7 +5122,9 @@ cdef class statistics: cdef class front_end: - """Class to access/update slurm front end node information.""" + """ + Class to access/update slurm front end node information. + """ cdef: slurm.time_t Time @@ -5001,16 +5144,22 @@ cdef class front_end: self.__destroy() cpdef __destroy(self): - """Free the memory allocated by load front end node method.""" + """ + Free the memory allocated by load front end node method. + """ if self._FrontEndNode_ptr is not NULL: slurm.slurm_free_front_end_info_msg(self._FrontEndNode_ptr) def load(self): - """Load slurm front end node information.""" + """ + Load slurm front end node information. + """ self.__load() cdef int __load(self) except? -1: - """Load slurm front end node.""" + """ + Load slurm front end node. + """ cdef: # slurm.front_end_info_msg_t *new_FrontEndNode_ptr = NULL time_t last_time = NULL @@ -5031,26 +5180,29 @@ cdef class front_end: return errCode def lastUpdate(self): - """Return last time (sepoch seconds) the node data was updated. + """ + Return last time (sepoch seconds) the node data was updated. :returns: epoch seconds - :rtype: `integer` + :rtype: integer """ return self._lastUpdate def ids(self): - """Return the node IDs from retrieved data. + """ + Return the node IDs from retrieved data. :returns: Dictionary of node IDs - :rtype: `dict` + :rtype: dict """ return list(self._FrontEndDict.keys()) def get(self): - """Get front end node information. + """ + Get front end node information. :returns: Dictionary whose key is the Topology ID - :rtype: `dict` + :rtype: dict """ self.__load() self.__get() @@ -5093,7 +5245,9 @@ cdef class front_end: cdef class qos: - """Class to access/update slurm QOS information.""" + """ + Class to access/update slurm QOS information. + """ cdef: void *dbconn @@ -5108,16 +5262,22 @@ cdef class qos: self.__destroy() cdef __destroy(self): - """QOS Destructor method.""" + """ + QOS Destructor method. + """ self._QOSDict = {} def load(self): - """Load slurm QOS information.""" + """ + Load slurm QOS information. + """ self.__load() cdef int __load(self) except? -1: - """Load slurm QOS list.""" + """ + Load slurm QOS list. + """ cdef: slurm.slurmdb_qos_cond_t *new_qos_cond = NULL int apiError = 0 @@ -5134,26 +5294,29 @@ cdef class qos: return 0 def lastUpdate(self): - """Return last time (sepoch seconds) the QOS data was updated. + """ + Return last time (sepoch seconds) the QOS data was updated. :returns: epoch seconds - :rtype: `integer` + :rtype: integer """ return self._lastUpdate def ids(self): - """Return the QOS IDs from retrieved data. + """ + Return the QOS IDs from retrieved data. :returns: Dictionary of QOS IDs - :rtype: `dict` + :rtype: dict """ return self._QOSDict.keys() def get(self): - """Get slurm QOS information. + """ + Get slurm QOS information. :returns: Dictionary whose key is the QOS ID - :rtype: `dict` + :rtype: dict """ self.__load() self.__get() @@ -5233,7 +5396,9 @@ cdef class qos: # slurmdbd jobs Class # cdef class slurmdb_jobs: - """Class to access Slurmdbd Jobs information.""" + """ + Class to access Slurmdbd Jobs information. + """ cdef: void* db_conn @@ -5248,7 +5413,8 @@ cdef class slurmdb_jobs: slurm.slurmdb_connection_close(&self.db_conn) def get(self, jobids=[], userids=[], starttime=0, endtime=0, flags = None, db_flags = None, clusters = []): - """Get Slurmdb information about some jobs. + """ + Get Slurmdb information about some jobs. Input formats for start and end times: * today or tomorrow @@ -5265,7 +5431,7 @@ cdef class slurmdb_jobs: :param starttime: Select jobs eligible after this timestamp :param endtime: Select jobs eligible before this timestamp :returns: Dictionary whose key is the JOBS ID - :rtype: `dict` + :rtype: dict """ cdef: int i = 0 @@ -5488,7 +5654,9 @@ cdef class slurmdb_jobs: # slurmdbd Reservations Class # cdef class slurmdb_reservations: - """Class to access Slurmdbd reservations information.""" + """ + Class to access Slurmdbd reservations information. + """ cdef: void *dbconn @@ -5501,7 +5669,8 @@ cdef class slurmdb_reservations: slurm.slurmdb_destroy_reservation_cond(self.reservation_cond) def set_reservation_condition(self, start_time, end_time): - """Limit the next get() call to reservations that start after and before a certain time. + """ + Limit the next get() call to reservations that start after and before a certain time. :param start_time: Select reservations that start after this timestamp :param end_time: Select reservations that end before this timestamp @@ -5517,10 +5686,11 @@ cdef class slurmdb_reservations: raise MemoryError() def get(self): - """Get slurm reservations information. + """ + Get slurm reservations information. :returns: Dictionary whose keys are the reservations ids - :rtype: `dict` + :rtype: dict """ cdef: slurm.List reservation_list @@ -5589,7 +5759,9 @@ cdef class slurmdb_reservations: # slurmdbd clusters Class # cdef class slurmdb_clusters: - """Class to access Slurmdbd Clusters information.""" + """ + Class to access Slurmdbd Clusters information. + """ cdef: void *db_conn @@ -5605,7 +5777,8 @@ cdef class slurmdb_clusters: slurm.slurmdb_connection_close(&self.db_conn) def set_cluster_condition(self, start_time, end_time): - """Limit the next get() call to clusters that existed after and before + """ + Limit the next get() call to clusters that existed after and before a certain time. :param start_time: Select clusters that existed after this timestamp @@ -5624,10 +5797,11 @@ cdef class slurmdb_clusters: raise MemoryError() def get(self): - """Get slurm clusters information. + """ + Get slurm clusters information. :returns: Dictionary whose keys are the clusters ids - :rtype: `dict` + :rtype: dict """ cdef: slurm.List clusters_list @@ -5705,7 +5879,9 @@ cdef class slurmdb_clusters: # slurmdbd Events Class # cdef class slurmdb_events: - """Class to access Slurmdbd events information.""" + """ + Class to access Slurmdbd events information. + """ cdef: void *dbconn @@ -5718,7 +5894,8 @@ cdef class slurmdb_events: slurm.slurmdb_destroy_event_cond(self.event_cond) def set_event_condition(self, start_time, end_time): - """Limit the next get() call to conditions that existed after and before a certain time. + """ + Limit the next get() call to conditions that existed after and before a certain time. :param start_time: Select conditions that existed after this timestamp :param end_time: Select conditions that existed before this timestamp @@ -5734,10 +5911,11 @@ cdef class slurmdb_events: raise MemoryError() def get(self): - """Get slurm events information. + """ + Get slurm events information. :returns: Dictionary whose keys are the events ids - :rtype: `dict` + :rtype: dict """ cdef: slurm.List event_list @@ -5783,7 +5961,9 @@ cdef class slurmdb_events: # cdef class slurmdb_reports: - """Class to access Slurmdbd reports.""" + """ + Class to access Slurmdbd reports. + """ cdef: void *db_conn @@ -5888,12 +6068,13 @@ cdef class slurmdb_reports: def get_last_slurm_error(): - """Get and return the last error from a slurm API call. + """ + Get and return the last error from a slurm API call. :returns: Slurm error number and the associated error string - :rtype: `integer` + :rtype: integer :returns: Slurm error string - :rtype: `string` + :rtype: string """ rc = slurm.slurm_get_errno() @@ -5903,11 +6084,12 @@ def get_last_slurm_error(): return (rc, slurm.stringOrNone(slurm.slurm_strerror(rc), '')) cdef inline dict __get_licenses(char *licenses): - """Returns a dict of licenses from the slurm license string. + """ + Returns a dict of licenses from the slurm license string. :param string licenses: String containing license information :returns: Dictionary of licenses and associated value. - :rtype: `dict` + :rtype: dict """ if (licenses is NULL): return {} @@ -5931,17 +6113,19 @@ cdef inline dict __get_licenses(char *licenses): def get_node_use(inx): - """Returns a string that represents the block node mode. + """ + Returns a string that represents the block node mode. :param int ResType: Slurm block node usage :returns: Block node usage string - :rtype: `string` + :rtype: string """ return slurm.slurm_node_state_string(inx) def get_trigger_res_type(uint16_t inx): - """Returns a string that represents the slurm trigger res type. + """ + Returns a string that represents the slurm trigger res type. :param int ResType: Slurm trigger res state - TRIGGER_RES_TYPE_JOB 1 @@ -5952,7 +6136,7 @@ def get_trigger_res_type(uint16_t inx): - TRIGGER_RES_TYPE_FRONT_END 6 - TRIGGER_RES_TYPE_OTHER 7 :returns: Trigger reservation state string - :rtype: `string` + :rtype: string """ return __get_trigger_res_type(inx) @@ -5978,7 +6162,8 @@ cdef inline object __get_trigger_res_type(uint16_t ResType): def get_trigger_type(uint32_t inx): - """Returns a string that represents the state of the slurm trigger. + """ + Returns a string that represents the state of the slurm trigger. :param int TriggerType: Slurm trigger type - TRIGGER_TYPE_UP 0x00000001 @@ -6002,7 +6187,7 @@ def get_trigger_type(uint32_t inx): - TRIGGER_TYPE_PRI_DB_RES_OP 0x00080000 - TRIGGER_TYPE_BURST_BUFFER 0x00100000 :returns: Trigger state string - :rtype: `string` + :rtype: string """ return __get_trigger_type(inx) @@ -6077,7 +6262,7 @@ cdef inline object __get_trigger_type(uint32_t TriggerType): # - RESERVE_FLAG_TIME_FLOAT 0x00020000 # - RESERVE_FLAG_REPLACE 0x00040000 # :returns: Reservation state string -# :rtype: `string` +# :rtype: string # """ # try: # return slurm.slurm_reservation_flags_string(inx) @@ -6090,7 +6275,7 @@ def get_debug_flags(uint64_t inx): :param int flags: Slurm debug flags :returns: Debug flag string - :rtype: `string` + :rtype: string """ return debug_flags2str(inx) @@ -6236,21 +6421,23 @@ cdef inline list debug_flags2str(uint64_t debug_flags): def get_node_state(uint32_t inx): - """Returns a string that represents the state of the slurm node. + """ + Returns a string that represents the state of the slurm node. :param int inx: Slurm node state :returns: Node state string - :rtype: `string` + :rtype: string """ return slurm.slurm_node_state_string(inx) def get_rm_partition_state(int inx): - """Returns a string that represents the partition state. + """ + Returns a string that represents the partition state. :param int inx: Slurm partition state :returns: Partition state string - :rtype: `string` + :rtype: string """ return __get_rm_partition_state(inx) @@ -6276,7 +6463,8 @@ cdef inline object __get_rm_partition_state(int inx): def get_preempt_mode(uint16_t inx): - """Returns a string that represents the preempt mode. + """ + Returns a string that represents the preempt mode. :param int inx: Slurm preempt mode - PREEMPT_MODE_OFF 0x0000 @@ -6285,13 +6473,14 @@ def get_preempt_mode(uint16_t inx): - PREEMPT_MODE_CANCEL 0x0008 - PREEMPT_MODE_GANG 0x8000 :returns: Preempt mode string - :rtype: `string` + :rtype: string """ return slurm.slurm_preempt_mode_string(inx) def get_partition_state(uint16_t inx): - """Returns a string that represents the state of the slurm partition. + """ + Returns a string that represents the state of the slurm partition. :param int inx: Slurm partition state - PARTITION_DOWN 0x01 @@ -6299,7 +6488,7 @@ def get_partition_state(uint16_t inx): - PARTITION_DRAIN 0x02 - PARTITION_INACTIVE 0x00 :returns: Partition state string - :rtype: `string` + :rtype: string """ state = "" if inx: @@ -6317,12 +6506,13 @@ def get_partition_state(uint16_t inx): return state cdef inline object __get_partition_state(int inx, int extended=0): - """Returns a string that represents the state of the partition. + """ + Returns a string that represents the state of the partition. :param int inx: Slurm partition type :param int extended: :returns: Partition state - :rtype: `string` + :rtype: string """ cdef: int drain_flag = (inx & 0x0200) @@ -6374,11 +6564,12 @@ cdef inline object __get_partition_state(int inx, int extended=0): def get_partition_mode(uint16_t flags=0, uint16_t max_share=0): - """Returns a string represents the state of the partition mode. + """ + Returns a string represents the state of the partition mode. :param int inx: Slurm partition mode :returns: Partition mode string - :rtype: `string` + :rtype: string """ return __get_partition_mode(flags, max_share) @@ -6431,7 +6622,8 @@ cdef inline dict __get_partition_mode(uint16_t flags=0, uint16_t max_share=0): def get_job_state(inx): - """Return the state of the slurm job state. + """ + Return the state of the slurm job state. :param int inx: Slurm job state - JOB_PENDING 0 @@ -6448,7 +6640,7 @@ def get_job_state(inx): - JOB_OOM 12 - JOB_END :returns: Job state string - :rtype: `string` + :rtype: string """ try: job_state = slurm.stringOrNone(slurm.slurm_job_state_string(inx), '') @@ -6458,22 +6650,24 @@ def get_job_state(inx): def get_job_state_reason(inx): - """Returns a reason why the slurm job is in a provided state. + """ + Returns a reason why the slurm job is in a provided state. :param int inx: Slurm job state reason :returns: Reason string - :rtype: `string` + :rtype: string """ job_reason = slurm.stringOrNone(slurm.slurm_job_reason_string(inx), '') return job_reason def epoch2date(epochSecs): - """Convert epoch secs to a python time string. + """ + Convert epoch secs to a python time string. :param int epochSecs: Seconds since epoch :returns: Date - :rtype: `string` + :rtype: string """ try: dateTime = p_time.gmtime(epochSecs) @@ -6509,7 +6703,9 @@ class Dict(defaultdict): cdef class licenses: - """Class to access slurm controller license information.""" + """ + Class to access slurm controller license information. + """ cdef: slurm.license_info_msg_t *_msg @@ -6523,26 +6719,30 @@ cdef class licenses: self._lastUpdate = NULL def __dealloc__(self): - """Free the memory allocated by load licenses method.""" + """ + Free the memory allocated by load licenses method. + """ pass def lastUpdate(self): - """Return last time (epoch seconds) license data was updated. + """ + Return last time (epoch seconds) license data was updated. :returns: epoch seconds - :rtype: `integer` + :rtype: integer """ return self._lastUpdate def ids(self): - """Return the current license names from retrieved license data. + """ + Return the current license names from retrieved license data. This method calls slurm_load_licenses to retrieve license information from the controller. slurm_free_license_info_msg is used to free the license message buffer. :returns: Dictionary of licenses - :rtype: `dict` + :rtype: dict """ cdef: int rc @@ -6567,14 +6767,15 @@ cdef class licenses: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) cpdef get(self): - """Get full license information from the slurm controller. + """ + Get full license information from the slurm controller. This method calls slurm_load_licenses to retrieve license information from the controller. slurm_free_license_info_msg is used to free the license message buffer. :returns: Dictionary whose key is the license name - :rtype: `dict` + :rtype: dict """ cdef: int rc diff --git a/pyslurm/slurm/header.pxi b/pyslurm/slurm/header.pxi index 8dc36a95..7de32bf2 100644 --- a/pyslurm/slurm/header.pxi +++ b/pyslurm/slurm/header.pxi @@ -720,7 +720,7 @@ cdef extern from "slurm/slurm.h": unsigned char type unsigned char hash[32] - cpdef enum job_states: + cdef enum job_states: JOB_PENDING JOB_RUNNING JOB_SUSPENDED @@ -735,7 +735,7 @@ cdef extern from "slurm/slurm.h": JOB_OOM JOB_END - cpdef enum job_state_reason: + cdef enum job_state_reason: WAIT_NO_REASON WAIT_PRIORITY WAIT_DEPENDENCY @@ -936,25 +936,25 @@ cdef extern from "slurm/slurm.h": WAIT_QOS_MIN_BILLING WAIT_RESV_DELETED - cpdef enum job_acct_types: + cdef enum job_acct_types: JOB_START JOB_STEP JOB_SUSPEND JOB_TERMINATED - cpdef enum auth_plugin_type: + cdef enum auth_plugin_type: AUTH_PLUGIN_NONE AUTH_PLUGIN_MUNGE AUTH_PLUGIN_JWT - cpdef enum hash_plugin_type: + cdef enum hash_plugin_type: HASH_PLUGIN_DEFAULT HASH_PLUGIN_NONE HASH_PLUGIN_K12 HASH_PLUGIN_SHA256 HASH_PLUGIN_CNT - cpdef enum select_plugin_type: + cdef enum select_plugin_type: SELECT_PLUGIN_CONS_RES SELECT_PLUGIN_LINEAR SELECT_PLUGIN_SERIAL @@ -963,27 +963,27 @@ cdef extern from "slurm/slurm.h": SELECT_PLUGIN_CONS_TRES SELECT_PLUGIN_CRAY_CONS_TRES - cpdef enum switch_plugin_type: + cdef enum switch_plugin_type: SWITCH_PLUGIN_NONE SWITCH_PLUGIN_GENERIC SWITCH_PLUGIN_CRAY SWITCH_PLUGIN_SLINGSHOT - cpdef enum select_jobdata_type: + cdef enum select_jobdata_type: SELECT_JOBDATA_PAGG_ID SELECT_JOBDATA_PTR SELECT_JOBDATA_CLEANING SELECT_JOBDATA_NETWORK SELECT_JOBDATA_RELEASED - cpdef enum select_nodedata_type: + cdef enum select_nodedata_type: SELECT_NODEDATA_SUBCNT SELECT_NODEDATA_PTR SELECT_NODEDATA_MEM_ALLOC SELECT_NODEDATA_TRES_ALLOC_FMT_STR SELECT_NODEDATA_TRES_ALLOC_WEIGHTED - cpdef enum select_print_mode: + cdef enum select_print_mode: SELECT_PRINT_HEAD SELECT_PRINT_DATA SELECT_PRINT_MIXED @@ -1002,7 +1002,7 @@ cdef extern from "slurm/slurm.h": SELECT_PRINT_RESV_ID SELECT_PRINT_START_LOC - cpdef enum select_node_cnt: + cdef enum select_node_cnt: SELECT_GET_NODE_SCALING SELECT_GET_NODE_CPU_CNT SELECT_GET_MP_CPU_CNT @@ -1011,19 +1011,19 @@ cdef extern from "slurm/slurm.h": SELECT_SET_NODE_CNT SELECT_SET_MP_CNT - cpdef enum acct_gather_profile_info: + cdef enum acct_gather_profile_info: ACCT_GATHER_PROFILE_DIR ACCT_GATHER_PROFILE_DEFAULT ACCT_GATHER_PROFILE_RUNNING - cpdef enum jobacct_data_type: + cdef enum jobacct_data_type: JOBACCT_DATA_TOTAL JOBACCT_DATA_PIPE JOBACCT_DATA_RUSAGE JOBACCT_DATA_TOT_VSIZE JOBACCT_DATA_TOT_RSS - cpdef enum acct_energy_type: + cdef enum acct_energy_type: ENERGY_DATA_JOULES_TASK ENERGY_DATA_STRUCT ENERGY_DATA_RECONFIG @@ -1034,7 +1034,7 @@ cdef extern from "slurm/slurm.h": ENERGY_DATA_NODE_ENERGY_UP ENERGY_DATA_STEP_PTR - cpdef enum task_dist_states: + cdef enum task_dist_states: SLURM_DIST_CYCLIC SLURM_DIST_BLOCK SLURM_DIST_ARBITRARY @@ -1075,7 +1075,7 @@ cdef extern from "slurm/slurm.h": ctypedef task_dist_states task_dist_states_t - cpdef enum cpu_bind_type: + cdef enum cpu_bind_type: CPU_BIND_VERBOSE CPU_BIND_TO_THREADS CPU_BIND_TO_CORES @@ -1097,7 +1097,7 @@ cdef extern from "slurm/slurm.h": ctypedef cpu_bind_type cpu_bind_type_t - cpdef enum mem_bind_type: + cdef enum mem_bind_type: MEM_BIND_VERBOSE MEM_BIND_NONE MEM_BIND_RANK @@ -1109,14 +1109,14 @@ cdef extern from "slurm/slurm.h": ctypedef mem_bind_type mem_bind_type_t - cpdef enum accel_bind_type: + cdef enum accel_bind_type: ACCEL_BIND_VERBOSE ACCEL_BIND_CLOSEST_GPU ACCEL_BIND_CLOSEST_NIC ctypedef accel_bind_type accel_bind_type_t - cpdef enum node_states: + cdef enum node_states: NODE_STATE_UNKNOWN NODE_STATE_DOWN NODE_STATE_IDLE @@ -1662,7 +1662,7 @@ cdef extern from "slurm/slurm.h": ctypedef srun_step_missing_msg srun_step_missing_msg_t - cpdef enum suspend_opts: + cdef enum suspend_opts: SUSPEND_JOB RESUME_JOB @@ -3140,7 +3140,7 @@ cdef extern from "slurm/slurmdb.h": SLURMDB_ADD_TRES SLURMDB_UPDATE_FEDS - cpdef enum cluster_fed_states: + cdef enum cluster_fed_states: CLUSTER_FED_STATE_NA CLUSTER_FED_STATE_ACTIVE CLUSTER_FED_STATE_INACTIVE @@ -3231,7 +3231,7 @@ cdef extern from "slurm/slurmdb.h": uint16_t with_coords uint16_t with_deleted - cpdef enum: + cdef enum: SLURMDB_ACCT_FLAG_NONE SLURMDB_ACCT_FLAG_DELETED @@ -3726,7 +3726,7 @@ cdef extern from "slurm/slurmdb.h": uint16_t with_wckeys uint16_t without_defaults - cpdef enum: + cdef enum: SLURMDB_USER_FLAG_NONE SLURMDB_USER_FLAG_DELETED @@ -3759,7 +3759,7 @@ cdef extern from "slurm/slurmdb.h": uint16_t with_usage uint16_t with_deleted - cpdef enum: + cdef enum: SLURMDB_WCKEY_FLAG_NONE SLURMDB_WCKEY_FLAG_DELETED @@ -3827,7 +3827,7 @@ cdef extern from "slurm/slurmdb.h": uint32_t count List tres_list - cpdef enum: + cdef enum: DBD_ROLLUP_HOUR DBD_ROLLUP_DAY DBD_ROLLUP_MONTH diff --git a/scripts/builddocs.sh b/scripts/builddocs.sh index 5e555bfd..cc5625c2 100755 --- a/scripts/builddocs.sh +++ b/scripts/builddocs.sh @@ -1,24 +1,5 @@ #!/bin/bash -#set -e -########################################### -# Build the docs and push to GitHub Pages # -########################################### - -# Build docs for all jobs within build -pip$PYTHON install Sphinx>=1.1 -make BUILDDIR=/root/docs -C /pyslurm/doc/ html - -# Only push to GitHub Pages once per build -if [[ "$PYTHON" == "2.7" && - "$CYTHON" == "0.27.3" && - "$SLURM" == "17.11.8" && - "$BRANCH" == "master" ]] -then - git clone https://github.com/pyslurm/pyslurm.github.io.git - rsync -av --delete --exclude=.git /root/docs/html/ /pyslurm.github.io/ - cd pyslurm.github.io - git add . - git -c user.name="Travis" -c user.email="Travis" commit -m 'Updated docs' - git push -q https://giovtorres:$GITHUB_TOKEN@github.com/pyslurm/pyslurm.github.io &2>/dev/null -fi +pip install -r doc_requirements.txt +pip install --no-build-isolation -e . +mkdocs build diff --git a/setup.cfg b/setup.cfg index af6883db..a7d6399b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,7 +1,3 @@ -[aliases] -doc=build_sphinx -docs=build_sphinx - [bdist_rpm] release = 1 packager = Giovanni Torres @@ -13,12 +9,6 @@ build_requires = python3-devel >= 3.6 requires = slurm use_bzip2 = 1 -[build_sphinx] -builder = man -source-dir = doc/source -build-dir = doc/build -all_files = 1 - [flake8] max-line-length = 88 extend-ignore = E203 diff --git a/setup.py b/setup.py index c36f279e..17e25bb5 100644 --- a/setup.py +++ b/setup.py @@ -157,7 +157,7 @@ def cleanup_build(): info("Removing build/") remove_tree("build", verbose=1) - files = find_files_with_extension("pyslurm", {".c", ".pyc"}) + files = find_files_with_extension("pyslurm", {".c", ".pyc", ".so"}) for file in files: if file.is_file(): From 3b8932539361a1e72445d2429026af10b9bd8738 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Wed, 15 Mar 2023 18:21:27 +0100 Subject: [PATCH 12/48] Migrate pyslurm.pyx from reST to google docstring style (#273) Also use type annotations to document the return type --- mkdocs.yml | 2 +- pyslurm/pyslurm.pyx | 1855 +++++++++++++++++++++---------------------- 2 files changed, 909 insertions(+), 948 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index 8e18475a..56aa68c6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -9,7 +9,7 @@ plugins: python: options: filters: ["!^_"] - docstring_style: sphinx + docstring_style: google show_signature: true show_root_heading: true extra: diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index 84869d05..a179c4fb 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -272,12 +272,11 @@ ctypedef struct config_key_pair_t: # -def get_controllers(): - """ - Get information about slurm controllers. +def get_controllers() -> tuple: + """Get information about slurm controllers. - :return: Name of primary controller, Name of backup controllers - :rtype: tuple + Returns: + Name of primary controller, Name of backup controllers """ cdef: slurm.slurm_conf_t *slurm_ctl_conf_ptr = NULL @@ -304,14 +303,14 @@ def get_controllers(): return control_machs -def is_controller(Host=None): - """ - Return slurm controller status for host. +def is_controller(Host=None) -> str: + """Return slurm controller status for host. - :param string Host: Name of host to check + Args: + Host (str): Name of host to check - :returns: None, primary or backup - :rtype: string + Returns: + None, "primary" or "backup" """ control_machs = get_controllers() if not Host: @@ -326,12 +325,11 @@ def is_controller(Host=None): return 'backup' -def slurm_api_version(): - """ - Return the slurm API version number. +def slurm_api_version() -> tuple: + """Return the slurm API version number. - :returns: version_major, version_minor, version_micro - :rtype: tuple + Returns: + A tuple of version_major, version_minor, version_micro """ cdef long version = slurm.SLURM_VERSION_NUMBER @@ -340,12 +338,11 @@ def slurm_api_version(): SLURM_VERSION_MICRO(version)) -def slurm_load_slurmd_status(): - """ - Issue RPC to get and load the status of Slurmd daemon. +def slurm_load_slurmd_status() -> str: + """Issue RPC to get and load the status of Slurmd daemon. - :returns: Slurmd information - :rtype: dict + Returns: + Slurmd information """ cdef: dict Status = {}, Status_dict = {} @@ -377,16 +374,15 @@ def slurm_load_slurmd_status(): return Status def slurm_init(conf_file=None): - """ + """Initialize the Slurm API internal structures. + This function MUST be called before any internal API calls to ensure Slurm's internal configuration structures have been populated. - :param string conf_file: Absolute path to the configuration file - (optional). If None (default value), libslurm automatically locates its - own configuration. - - :returns: None - :rtype: None + Args: + conf_file (str, optional): Absolute path to the configuration file. If + None (default value), libslurm automatically locates its own + configuration. """ if conf_file: slurm.slurm_init(conf_file.encode('UTF-8')) @@ -394,22 +390,18 @@ def slurm_init(conf_file=None): slurm.slurm_init(NULL) def slurm_fini(): - """ - Call at process termination to cleanup internal configuration - structures. - - :returns: None - :rtype: None - """ + """Cleanup Slurm internal configuration structures.""" slurm.slurm_fini() # # Slurm Config Class # -def get_private_data_list(data): - """ - Return the list of enciphered Private Data configuration. +def get_private_data_list(data) -> list: + """Retrieve the enciphered Private Data configuration. + + Returns: + Private data """ result = [] @@ -426,9 +418,7 @@ def get_private_data_list(data): return result cdef class config: - """ - Class to access slurm config Information. - """ + """Slurm Config Information.""" cdef: slurm.slurm_conf_t *slurm_ctl_conf_ptr @@ -446,37 +436,35 @@ cdef class config: def __dealloc__(self): self.__free() - def lastUpdate(self): - """ - Get the time (epoch seconds) the retrieved data was updated. + def lastUpdate(self) -> int: + """Get the time (epoch seconds) the retrieved data was updated. - :returns: epoch seconds - :rtype: integer + Returns: + Epoch seconds """ return self._lastUpdate - def ids(self): - """ - Return the config IDs from retrieved data. + def ids(self) -> dict: + """Return the config IDs from retrieved data. - :returns: Dictionary of config key IDs - :rtype: dict + Returns: + Dictionary of config key IDs """ return self.__ConfigDict.keys() - def find_id(self, char *keyID=''): - """ - Retrieve config ID data. - :param str keyID: Config key string to search - :returns: Dictionary of values for given config key - :rtype: dict + def find_id(self, char *keyID='') -> dict: + """Retrieve config ID data. + + Args: + keyID (str): Config key string to search + + Returns: + Dictionary of values for given config key """ return self.__ConfigDict.get(keyID, {}) cdef void __free(self): - """ - Free memory allocated by slurm_load_ctl_conf. - """ + """Free memory allocated by slurm_load_ctl_conf.""" if self.__Config_ptr is not NULL: slurm.slurm_free_ctl_conf(self.__Config_ptr) self.__Config_ptr = NULL @@ -484,17 +472,14 @@ cdef class config: self.__lastUpdate = 0 def display_all(self): - """ - Print slurm control configuration information. - """ + """Print slurm control configuration information.""" slurm.slurm_print_ctl_conf(slurm.stdout, self.__Config_ptr) cdef int __load(self) except? -1: - """ - Load the slurm control configuration information. + """Load the slurm control configuration information. - :returns: slurm error code - :rtype: integer + Returns: + int: slurm error code """ cdef: slurm.slurm_conf_t *slurm_ctl_conf_ptr = NULL @@ -509,12 +494,11 @@ cdef class config: self.__Config_ptr = slurm_ctl_conf_ptr return errCode - def key_pairs(self): - """ - Return a dict of the slurm control data as key pairs. + def key_pairs(self) -> dict: + """Return a dict of the slurm control data as key pairs. - :returns: Dictionary of slurm key-pair values - :rtype: dict + Returns: + Dictionary of slurm key-pair values """ cdef: void *ret_list = NULL @@ -549,12 +533,11 @@ cdef class config: return keyDict - def get(self): - """ - Return the slurm control configuration information. + def get(self) -> dict: + """Return the slurm control configuration information. - :returns: Configuration data - :rtype: dict + Returns: + Configuration data """ self.__load() self.__get() @@ -562,12 +545,7 @@ cdef class config: return self.__ConfigDict cpdef dict __get(self): - """ - Get the slurm control configuration information. - - :returns: Configuration data - :rtype: dict - """ + """Get the slurm control configuration information.""" cdef: void *ret_list = NULL slurm.List config_list = NULL @@ -837,9 +815,7 @@ cdef class config: cdef class partition: - """ - Class to access/modify Slurm Partition Information. - """ + """Slurm Partition Information.""" cdef: slurm.partition_info_msg_t *_Partition_ptr @@ -855,21 +831,19 @@ cdef class partition: def __dealloc__(self): pass - def lastUpdate(self): - """ - Return time (epoch seconds) the partition data was updated. + def lastUpdate(self) -> int: + """Return time (epoch seconds) the partition data was updated. - :returns: epoch seconds - :rtype: integer + Returns: + Epoch seconds """ return self._lastUpdate - def ids(self): - """ - Return the partition IDs from retrieved data. + def ids(self) -> dict: + """Return the partition IDs from retrieved data. - :returns: Dictionary of partition IDs - :rtype: dict + Returns: + Dictionary of partition IDs """ cdef: int rc @@ -893,24 +867,26 @@ cdef class partition: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def find_id(self, partID): - """ - Get partition information for a given partition. + def find_id(self, partID) -> dict: + """Get partition information for a given partition. + + Args: + partID (str): Partition key string to search - :param str partID: Partition key string to search - :returns: Dictionary of values for given partition - :rtype: dict + Returns: + Dictionary of values for given partition """ return self.get().get(partID) - def find(self, name='', val=''): - """ - Search for a property and associated value in the retrieved partition data. + def find(self, name='', val='') -> list: + """Search for a property and associated value in the retrieved partition data. + + Args: + name (str): key string to search + val (str): value string to match - :param str name: key string to search - :param str value: value string to match - :returns: List of IDs that match - :rtype: list + Returns: + List of IDs that match """ cdef: list retList = [] @@ -925,10 +901,10 @@ cdef class partition: return retList def print_info_msg(self, int oneLiner=0): - """ - Display the partition information from previous load partition method. + """Display partition information from previous load partition method. - :param int oneLiner: Display on one line (default=0) + Args: + oneLiner (int, optional): Display on one line. """ cdef: int rc @@ -948,14 +924,14 @@ cdef class partition: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def delete(self, PartID): - """ - Delete a give slurm partition. + def delete(self, PartID) -> int: + """Delete a give slurm partition. - :param string PartID: Name of slurm partition + Args: + PartID (str): Name of slurm partition - :returns: 0 for success else set the slurm error code as appropriately. - :rtype: integer + Returns: + 0 for success else set the slurm error code as appropriately. """ cdef: slurm.delete_part_msg_t part_msg @@ -973,12 +949,11 @@ cdef class partition: return errCode - def get(self): - """ - Get all slurm partition information + def get(self) -> dict: + """Get all slurm partition information - :returns: Dictionary of dictionaries whose key is the partition name. - :rtype: dict + Returns: + Dictionary of dictionaries whose key is the partition name. """ cdef: int rc @@ -1152,40 +1127,41 @@ cdef class partition: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def update(self, dict Partition_dict): - """ - Update a slurm partition. + def update(self, dict Partition_dict) -> int: + """Update a slurm partition. + + Args: + Partition_dict (dict): A populated partition dictionary, an empty + one is created by create_partition_dict - :param dict partition_dict: A populated partition dictionary, - an empty one is created by create_partition_dict - :returns: 0 for success, -1 for error, and the slurm error code - is set appropriately. - :rtype: integer + Returns: + 0 for success, -1 for error, and the slurm error code is set + appropriately. """ cdef int errCode = slurm_update_partition(Partition_dict) return errCode - def create(self, dict Partition_dict): - """ - Create a slurm partition. + def create(self, dict Partition_dict) -> int: + """Create a slurm partition. + + Args: + Partition_dict (dict): A populated partition dictionary, an empty + one can be created by create_partition_dict - :param dict partition_dict: A populated partition dictionary, - an empty one can be created by create_partition_dict - :returns: 0 for success or -1 for error, and the slurm error - code is set appropriately. - :rtype: integer + Returns: + 0 for success or -1 for error, and the slurm error code is set + appropriately. """ cdef int errCode = slurm_create_partition(Partition_dict) return errCode -def create_partition_dict(): - """ - Returns a dictionary that can be populated by the user +def create_partition_dict() -> dict: + """Returns a dictionary that can be populated by the user and used for the update_partition and create_partition calls. - :returns: Empty reservation dictionary - :rtype: dict + Returns: + Empty reservation dictionary """ return { 'Alternate': None, @@ -1206,15 +1182,16 @@ def create_partition_dict(): } -def slurm_create_partition(dict partition_dict): - """ - Create a slurm partition. +def slurm_create_partition(dict partition_dict) -> int: + """Create a slurm partition. + + Args: + partition_dict (dict): A populated partition dictionary, an empty one + can be created by create_partition_dict - :param dict partition_dict: A populated partition dictionary, - an empty one is created by create_partition_dict - :returns: 0 for success or -1 for error, and the slurm error - code is set appropriately. - :rtype: integer + Returns: + 0 for success or -1 for error, and the slurm error code is set + appropriately. """ cdef: slurm.update_part_msg_t part_msg_ptr @@ -1238,15 +1215,16 @@ def slurm_create_partition(dict partition_dict): return errCode -def slurm_update_partition(dict partition_dict): - """ - Update a slurm partition. +def slurm_update_partition(dict partition_dict) -> int: + """Update a slurm partition. + + Args: + partition_dict (dict): A populated partition dictionary, an empty one + is created by create_partition_dict - :param dict partition_dict: A populated partition dictionary, - an empty one is created by create_partition_dict - :returns: 0 for success, -1 for error, and the slurm error - code is set appropriately. - :rtype: integer + Returns: + 0 for success, -1 for error, and the slurm error code is set + appropriately. """ cdef: slurm.update_part_msg_t part_msg_ptr @@ -1304,13 +1282,14 @@ def slurm_update_partition(dict partition_dict): return errCode -def slurm_delete_partition(PartID): - """ - Delete a slurm partition. +def slurm_delete_partition(PartID) -> int: + """Delete a slurm partition. - :param string PartID: Name of slurm partition - :returns: 0 for success else set the slurm error code as appropriately. - :rtype: integer + Args: + PartID (str): Name of slurm partition + + Returns: + 0 for success else set the slurm error code as appropriately. """ cdef: slurm.delete_part_msg_t part_msg @@ -1334,12 +1313,14 @@ def slurm_delete_partition(PartID): cpdef int slurm_ping(int Controller=0) except? -1: - """ - Issue RPC to check if slurmctld is responsive. + """Issue RPC to check if slurmctld is responsive. - :param int Controller: 0 for primary (Default=0), 1 for backup, 2 for backup2, ... - :returns: 0 for success or slurm error code - :rtype: integer + Args: + Controller (int, optional): 0 for primary (Default=0), 1 for backup, 2 + for backup2, ... + + Returns: + 0 for success or slurm error code """ cdef int apiError = 0 cdef int errCode = slurm.slurm_ping(Controller) @@ -1352,11 +1333,10 @@ cpdef int slurm_ping(int Controller=0) except? -1: cpdef int slurm_reconfigure() except? -1: - """ - Issue RPC to have slurmctld reload its configuration file. + """Issue RPC to have slurmctld reload its configuration file. - :returns: 0 for success or a slurm error code - :rtype: integer + Returns: + 0 for success or slurm error code """ cdef int apiError = 0 cdef int errCode = slurm.slurm_reconfigure() @@ -1369,17 +1349,18 @@ cpdef int slurm_reconfigure() except? -1: cpdef int slurm_shutdown(uint16_t Options=0) except? -1: - """ - Issue RPC to have slurmctld cease operations. + """Issue RPC to have slurmctld cease operations. Both the primary and backup controller are shutdown. - :param int Options: - 0 - All slurm daemons (default) - 1 - slurmctld generates a core file - 2 - slurmctld is shutdown (no core file) - :returns: 0 for success or a slurm error code - :rtype: integer + Args: + Options (int, optional): + 0 - All slurm daemons (default) + 1 - slurmctld generates a core file + 2 - slurmctld is shutdown (no core file) + + Returns: + int: 0 for success or slurm error code """ cdef int apiError = 0 cdef int errCode = slurm.slurm_shutdown(Options) @@ -1392,13 +1373,12 @@ cpdef int slurm_shutdown(uint16_t Options=0) except? -1: cpdef int slurm_takeover(int backup_inx) except? -1: - """ - Issue a RPC to have slurmctld backup controller take over. + """Issue a RPC to have slurmctld backup controller take over. The backup controller takes over the primary controller. - :returns: 0 for success or a slurm error code - :rtype: integer + Returns: + int: 0 for success or slurm error code """ cdef int apiError = 0 cdef int errCode = slurm.slurm_takeover(backup_inx) @@ -1407,12 +1387,14 @@ cpdef int slurm_takeover(int backup_inx) except? -1: cpdef int slurm_set_debug_level(uint32_t DebugLevel=0) except? -1: - """ - Set the slurm controller debug level. + """Set the slurm controller debug level. + + Args: + DebugLevel (int, optional): The debug level. Possible values are from + 0 to 6. - :param int DebugLevel: 0 (default) to 6 - :returns: 0 for success, -1 for error and set slurm error number - :rtype: integer + Returns: + int: 0 for success, -1 for error and set slurm error number """ cdef int apiError = 0 cdef int errCode = slurm.slurm_set_debug_level(DebugLevel) @@ -1426,13 +1408,14 @@ cpdef int slurm_set_debug_level(uint32_t DebugLevel=0) except? -1: cpdef int slurm_set_debugflags(uint32_t debug_flags_plus=0, uint32_t debug_flags_minus=0) except? -1: - """ - Set the slurm controller debug flags. + """Set the slurm controller debug flags. - :param int debug_flags_plus: debug flags to be added - :param int debug_flags_minus: debug flags to be removed - :returns: 0 for success, -1 for error and set slurm error number - :rtype: integer + Args: + debug_flags_plus (int, optional): Debug flags to be added. + debug_flags_minus (int, optional): Debug flags to be removed. + + Returns: + int: 0 for success, -1 for error and set slurm error number """ cdef int apiError = 0 cdef int errCode = slurm.slurm_set_debugflags(debug_flags_plus, @@ -1446,12 +1429,13 @@ cpdef int slurm_set_debugflags(uint32_t debug_flags_plus=0, cpdef int slurm_set_schedlog_level(uint32_t Enable=0) except? -1: - """ - Set the slurm scheduler debug level. + """Set the slurm scheduler debug level. + + Args: + Enable (int, optional): True = 0, False = 1 - :param int Enable: True = 0, False = 1 - :returns: 0 for success, -1 for error and set the slurm error number - :rtype: integer + Returns: + int: 0 for success, -1 for error and set the slurm error number """ cdef int apiError = 0 cdef int errCode = slurm.slurm_set_schedlog_level(Enable) @@ -1469,12 +1453,13 @@ cpdef int slurm_set_schedlog_level(uint32_t Enable=0) except? -1: cpdef int slurm_suspend(uint32_t JobID=0) except? -1: - """ - Suspend a running slurm job. + """Suspend a running slurm job. + + Args: + JobID (int): The job id. - :param int JobID: Job identifier - :returns: 0 for success or a slurm error code - :rtype: integer + Returns: + int: 0 for success or slurm error code """ cdef int apiError = 0 cdef int errCode = slurm.slurm_suspend(JobID) @@ -1487,12 +1472,13 @@ cpdef int slurm_suspend(uint32_t JobID=0) except? -1: cpdef int slurm_resume(uint32_t JobID=0) except? -1: - """ - Resume a running slurm job step. + """ Resume a running slurm job step. - :param int JobID: Job identifier - :returns: 0 for success or a slurm error code - :rtype: integer + Args: + JobID (int): The job id. + + Returns: + int: 0 for success or slurm error code """ cdef int apiError = 0 cdef int errCode = slurm.slurm_resume(JobID) @@ -1505,12 +1491,13 @@ cpdef int slurm_resume(uint32_t JobID=0) except? -1: cpdef int slurm_requeue(uint32_t JobID=0, uint32_t State=0) except? -1: - """ - Requeue a running slurm job step. + """Requeue a running slurm job step. - :param int JobID: Job identifier - :returns: 0 for success or a slurm error code - :rtype: integer + Args: + JobID (int): The job id. + + Returns: + int: 0 for success or slurm error code """ cdef int apiError = 0 cdef int errCode = slurm.slurm_requeue(JobID, State) @@ -1523,12 +1510,13 @@ cpdef int slurm_requeue(uint32_t JobID=0, uint32_t State=0) except? -1: cpdef long slurm_get_rem_time(uint32_t JobID=0) except? -1: - """ - Get the remaining time in seconds for a slurm job step. + """Get the remaining time in seconds for a slurm job step. + + Args: + JobID (int): The job id. - :param int JobID: Job identifier - :returns: Remaining time in seconds or -1 on error - :rtype: long + Returns: + int: Remaining time in seconds or -1 on error """ cdef int apiError = 0 cdef long errCode = slurm.slurm_get_rem_time(JobID) @@ -1541,12 +1529,13 @@ cpdef long slurm_get_rem_time(uint32_t JobID=0) except? -1: cpdef time_t slurm_get_end_time(uint32_t JobID=0) except? -1: - """ - Get the end time in seconds for a slurm job step. + """Get the end time in seconds for a slurm job step. + + Args: + JobID (int): The job id. - :param int JobID: Job identifier - :returns: Remaining time in seconds or -1 on error - :rtype: integer + Returns: + int: Remaining time in seconds or -1 on error """ cdef time_t EndTime = -1 cdef int apiError = 0 @@ -1560,12 +1549,13 @@ cpdef time_t slurm_get_end_time(uint32_t JobID=0) except? -1: cpdef int slurm_job_node_ready(uint32_t JobID=0) except? -1: - """ - Return if a node could run a slurm job now if dispatched. + """Return if a node could run a slurm job now if dispatched. + + Args: + JobID (int): The job id. - :param int JobID: Job identifier - :returns: Node Ready code - :rtype: integer + Returns: + int: Node ready code. """ cdef int apiError = 0 cdef int errCode = slurm.slurm_job_node_ready(JobID) @@ -1574,13 +1564,14 @@ cpdef int slurm_job_node_ready(uint32_t JobID=0) except? -1: cpdef int slurm_signal_job(uint32_t JobID=0, uint16_t Signal=0) except? -1: - """ - Send a signal to a slurm job step. + """Send a signal to a slurm job step. - :param int JobID: Job identifier - :param int Signal: Signal to send (default=0) - :returns: 0 for success or -1 for error and the set Slurm errno - :rtype: integer + Args: + JobID (int): The job id. + Signal (int, optional): Signal to send. + + Returns: + int: 0 for success or -1 for error and the set Slurm errno """ cdef int apiError = 0 cdef int errCode = slurm.slurm_signal_job(JobID, Signal) @@ -1599,14 +1590,15 @@ cpdef int slurm_signal_job(uint32_t JobID=0, uint16_t Signal=0) except? -1: cpdef int slurm_signal_job_step(uint32_t JobID=0, uint32_t JobStep=0, uint16_t Signal=0) except? -1: - """ - Send a signal to a slurm job step. + """Send a signal to a slurm job step. - :param int JobID: Job identifier - :param int JobStep: Job step identifier - :param int Signal: Signal to send (default=0) - :returns: Error code - 0 for success or -1 for error and set the slurm errno - :rtype: integer + Args: + JobID (int): The job id. + JobStep: The id of the job step. + Signal (int, optional): Signal to send. + + Returns: + int: 0 for success or -1 for error and set the slurm errno. """ cdef int apiError = 0 cdef int errCode = slurm.slurm_signal_job_step(JobID, JobStep, Signal) @@ -1620,14 +1612,15 @@ cpdef int slurm_signal_job_step(uint32_t JobID=0, uint32_t JobStep=0, cpdef int slurm_kill_job(uint32_t JobID=0, uint16_t Signal=0, uint16_t BatchFlag=0) except? -1: - """ - Terminate a running slurm job step. + """Terminate a running slurm job step. + + Args: + JobID (int): The job id. + Signal (int): Signal to send. + BatchFlag (int, optional): Job batch flag. - :param int JobID: Job identifier - :param int Signal: Signal to send - :param int BatchFlag: Job batch flag (default=0) - :returns: 0 for success or -1 for error and set slurm errno - :rtype: integer + Returns: + int: 0 for success or -1 for error and set slurm errno """ cdef int apiError = 0 cdef int errCode = slurm.slurm_kill_job(JobID, Signal, BatchFlag) @@ -1641,14 +1634,15 @@ cpdef int slurm_kill_job(uint32_t JobID=0, uint16_t Signal=0, cpdef int slurm_kill_job_step(uint32_t JobID=0, uint32_t JobStep=0, uint16_t Signal=0) except? -1: - """ - Terminate a running slurm job step. + """Terminate a running slurm job step. + + Args: + JobID (int): The job id. + JobStep (int): The id of the job step. + Signal (int, optional): Signal to send. - :param int JobID: Job identifier - :param int JobStep: Job step identifier - :param int Signal: Signal to send (default=0) - :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: integer + Returns: + int: 0 for success or -1 for error, and slurm errno is set. """ cdef int apiError = 0 cdef int errCode = slurm.slurm_kill_job_step(JobID, JobStep, Signal) @@ -1662,15 +1656,16 @@ cpdef int slurm_kill_job_step(uint32_t JobID=0, uint32_t JobStep=0, cpdef int slurm_kill_job2(const char *JobID='', uint16_t Signal=0, uint16_t BatchFlag=0, char* sibling=NULL) except? -1: - """ - Terminate a running slurm job step. - - :param const char * JobID: Job identifier - :param int Signal: Signal to send - :param int BatchFlag: Job batch flag (default=0) - :param string sibling: optional string of sibling cluster to send the message to - :returns: 0 for success or -1 for error and set slurm errno - :rtype: integer + """Terminate a running slurm job step. + + Args: + JobID (str): The job id. + Signal (int): Signal to send. + BatchFlag (int, optional): Job batch flag. + sibling (str, optional): Sibling cluster to send the message to. + + Returns: + int: 0 for success or -1 for error, and slurm errno is set. """ cdef int apiError = 0 cdef int errCode = slurm.slurm_kill_job2(JobID, Signal, BatchFlag, sibling) @@ -1683,13 +1678,14 @@ cpdef int slurm_kill_job2(const char *JobID='', uint16_t Signal=0, cpdef int slurm_complete_job(uint32_t JobID=0, uint32_t JobCode=0) except? -1: - """ - Complete a running slurm job step. + """Complete a running slurm job step. - :param int JobID: Job identifier - :param int JobCode: Return code (default=0) - :returns: 0 for success or -1 for error and set slurm errno - :rtype: integer + Args: + JobID (int): The job id. + JobCode (int, optional): Return code for the job. + + Returns: + int: 0 for success or -1 for error and set slurm errno """ cdef int apiError = 0 cdef int errCode = slurm.slurm_complete_job(JobID, JobCode) @@ -1702,14 +1698,14 @@ cpdef int slurm_complete_job(uint32_t JobID=0, uint32_t JobCode=0) except? -1: cpdef int slurm_notify_job(uint32_t JobID=0, char* Msg='') except? -1: - """ - Notify a message to a running slurm job step. + """Notify a message to a running slurm job step. - :param string JobID: Job identifier (default=0) - :param string Msg: Message string to send to job - :returns: 0 for success or -1 on error - :rtype: integer + Args: + JobID (int): The job id + Msg (str): Message to send to the job. + Returns: + int: 0 for success or -1 on error """ cdef int apiError = 0 cdef int errCode = slurm.slurm_notify_job(JobID, Msg) @@ -1722,14 +1718,14 @@ cpdef int slurm_notify_job(uint32_t JobID=0, char* Msg='') except? -1: cpdef int slurm_terminate_job_step(uint32_t JobID=0, uint32_t JobStep=0) except? -1: - """ - Terminate a running slurm job step. + """Terminate a running slurm job step. + + Args: + JobID (int): The job id + JobStep (int): The id of the job step - :param int JobID: Job identifier (default=0) - :param int JobStep: Job step identifier (default=0) - :returns: 0 for success or -1 for error, and the slurm error code - is set appropriately. - :rtype: integer + Returns: + 0 for success or -1 for error, and the slurm error code is set """ cdef int apiError = 0 cdef int errCode = slurm.slurm_terminate_job_step(JobID, JobStep) @@ -1746,9 +1742,7 @@ cpdef int slurm_terminate_job_step(uint32_t JobID=0, uint32_t JobStep=0) except? cdef class job: - """ - Class to access/modify Slurm Job Information. - """ + """Slurm Job Information.""" cdef: slurm.job_info_msg_t *_job_ptr @@ -1767,32 +1761,28 @@ cdef class job: def __dealloc__(self): pass - def lastUpdate(self): - """ - Get the time (epoch seconds) the job data was updated. + def lastUpdate(self) -> int: + """Get the time (epoch seconds) the job data was updated. - :returns: epoch seconds - :rtype: integer + Returns: + Epoch seconds """ return self._lastUpdate - def lastBackfill(self): - """ - Get the time (epoch seconds) of last backfilling run. + def lastBackfill(self) -> int: + """Get the time (epoch seconds) of last backfilling run. - :returns: epoch seconds - :rtype: integer + Returns: + Epoch seconds """ return self._lastBackfill - cpdef ids(self): - """ - Return the job IDs from retrieved data. + def ids(self) -> dict: + """Return the job IDs from retrieved data. - :returns: Dictionary of job IDs - :rtype: dict + Returns: + Dictionary of job IDs """ - cdef: int rc int apiError @@ -1812,14 +1802,15 @@ cdef class job: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def find(self, name='', val=''): - """ - Search for a property and associated value in the retrieved job data. + def find(self, name='', val='') -> list: + """Search for a property and associated value in the retrieved job data. - :param str name: key string to search - :param str value: value string to match - :returns: List of IDs that match - :rtype: list + Args: + name (str): key string to search + val (str): value string to match + + Returns: + List of IDs that match """ cdef: list retList = [] @@ -1835,8 +1826,7 @@ cdef class job: return retList cdef _load_single_job(self, jobid): - """ - Uses slurm_load_job to setup the self._job_ptr for a single job given by the jobid. + """Uses slurm_load_job to setup the self._job_ptr for a single job given by the jobid. After calling this, the job pointer can be used in other methods to operate on the informations of the job. @@ -1845,9 +1835,8 @@ cdef class job: Raises an value error if the jobid does not correspond to a existing job. - :param str jobid: The jobid - :returns: void - :rtype: None. + Args: + jobid (str): The job id. """ cdef: int apiError @@ -1868,32 +1857,34 @@ cdef class job: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def find_id(self, jobid): - """ - Retrieve job ID data. - + def find_id(self, jobid) -> list: + """Retrieve job ID data. + This method accepts both string and integer formats of the jobid. This works for single jobs and job arrays. It uses the internal helper _load_single_job to do slurm_load_job. If the job corresponding to the jobid does not exist, a ValueError will be raised. - :param str jobid: Job id key string to search - :returns: List of dictionary of values for given job id - :rtype: list + Args: + jobid (str): Job id key string to search + + Returns: + List of dictionary of values for given job id """ self._load_single_job(jobid) return list(self.get_job_ptr().values()) - def find_user(self, user): - """ - Retrieve a user's job data. - + def find_user(self, user) -> dict: + """Retrieve a user's job data. + This method calls slurm_load_job_user to get all job_table records associated with a specific user. - :param str user: User string to search - :returns: Dictionary of values for all user's jobs - :rtype: dict + Args: + user (str): User string to search + + Returns: + Dictionary of values for all user's jobs """ cdef: int apiError @@ -1916,15 +1907,15 @@ cdef class job: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - cpdef get(self): - """ - Get all slurm jobs information. + def get(self) -> dict: + """Get all slurm jobs information. - This method calls slurm_load_jobs to get job_table records for all jobs + This method calls slurm_load_jobs to get job_table records for all + jobs - :returns: Data where key is the job name, each entry contains a - dictionary of job attributes - :rtype: dict + Returns: + Data where key is the job name, each entry contains a dictionary + of job attributes """ cdef: int apiError @@ -1939,11 +1930,10 @@ cdef class job: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) cdef dict get_job_ptr(self): - """ - Convert all job arrays in buffer to dictionary. + """Convert all job arrays in buffer to dictionary. - :returns: dictionary of job attributes - :rtype: dict + Returns: + dict: dictionary of job attributes """ cdef: char time_str[32] @@ -2275,12 +2265,13 @@ cdef class job: return self._JobDict cpdef int __cpus_allocated_on_node_id(self, int nodeID=0): - """ - Get the number of cpus allocated to a job on a node by node name. + """Get the number of cpus allocated to a job on a node by node name. - :param int nodeID: Numerical node ID - :returns: Num of CPUs allocated to job on this node or -1 on error - :rtype: integer + Args: + nodeID (int): Numerical node ID + + Returns: + int: Num of CPUs allocated to job on this node or -1 on error """ cdef: slurm.job_resources_t *job_resrcs_ptr = self._record.job_resrcs @@ -2289,12 +2280,13 @@ cdef class job: return retval cdef int __cpus_allocated_on_node(self, char* nodeName=''): - """ - Get the number of cpus allocated to a slurm job on a node by node name. + """Get the number of cpus allocated to a slurm job on a node by node name. + + Args: + nodeName (str): Name of the node - :param string nodeName: Name of node - :returns: Num of CPUs allocated to job on this node or -1 on error - :rtype: integer + Returns: + Num of CPUs allocated to job on this node or -1 on error """ cdef: slurm.job_resources_t *job_resrcs_ptr = self._record.job_resrcs @@ -2303,12 +2295,13 @@ cdef class job: return retval cdef list __cpus_allocated_list_on_node(self, char* nodeName=''): - """ - Get a list of cpu ids allocated to current slurm job on a node by node name. + """Get a list of cpu ids allocated to current slurm job on a node by node name. + + Args: + nodeName (str): Name of the node - :param string nodeName: Name of node - :returns: list of allocated cpus (empty, if nothing found or error) - :rtype: list + Returns: + list of allocated cpus (empty, if nothing found or error) """ cdef: int error = 0 @@ -2328,13 +2321,14 @@ cdef class job: return cpus_list - def __unrange(self, bit_str): - """ - converts a string describing a bitmap (from slurm_job_cpus_allocated_str_on_node()) to a list. + def __unrange(self, bit_str) -> list: + """converts a string describing a bitmap (from slurm_job_cpus_allocated_str_on_node()) to a list. + + Args: + bit_str (str): string describing a bitmap (e.g. "0-30,45,50-60") - :param string bit_str: string describing a bitmap (e.g. "0-30,45,50-60") - :returns: list referring to bitmap (empty if not succesful) - :rtype: list + Returns: + List referring to bitmap (empty if not succesful) """ r_list = [] @@ -2352,19 +2346,16 @@ cdef class job: return r_list cpdef __free(self): - """ - Release the storage generated by the slurm_get_job_steps function. - """ + """Release storage generated by the slurm_get_job_steps function.""" if self._job_ptr is not NULL: slurm.slurm_free_job_info_msg(self._job_ptr) - cpdef print_job_info_msg(self, int oneLiner=0): - """ - Print the data structure describing all job step records. - - The job step records are loaded by the slurm_get_job_steps function. + def print_job_info_msg(self, int oneLiner=0): + """Print the data structure describing all job step records. - :param int Flag: Default=0 + Args: + oneLiner (int, optional): Whether to print the data in one line or + not """ cdef: int rc @@ -2381,19 +2372,17 @@ cdef class job: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def slurm_job_batch_script(self, jobid): - """ - Return the contents of the batch-script for a Job. + def slurm_job_batch_script(self, jobid) -> str: + """Return the contents of the batch-script for a Job. + + The string returned also includes all the "\\n" characters (new-line). - Note: The string returned also includes all the "\\n" characters - (new-line). + Args: + jobid (Union[str, int]): ID of the Job for which the script should + be retrieved. - :param jobid: ID of the Job for which the script should be retrieved. - :type jobid: Union[str, int] - :raises: [ValueError]: When retrieving the Batch-Script for the Job was - not successful. - :returns: The content of the batch script. - :rtype: str + Returns: + The content of the batch script. """ # This reimplements the slurm_job_batch_script API call. Otherwise we # would have to parse the FILE* ptr we get from it back into a @@ -2863,10 +2852,17 @@ cdef class job: req.wait_all_nodes = slurm.NO_VAL return rc - def submit_batch_job(self, job_opts): - """ - Submit batch job. - * make sure options match sbatch command line opts and not struct member names. + def submit_batch_job(self, job_opts) -> int: + """Submit batch job. + + Make sure options match sbatch command line opts and not struct member + names. + + Args: + job_opts (dict): Job information. + + Returns: + The job id of the submitted job. """ cdef: slurm.job_desc_msg_t desc @@ -3025,15 +3021,18 @@ cdef class job: #return "Submitted batch job %s" % job_id return job_id - def wait_finished(self, jobid): - """ - Block until the job given by the jobid finishes. + def wait_finished(self, jobid) -> int: + """Block until the job given by the jobid finishes. + This works for single jobs, as well as job arrays. - :param jobid: The job id of the slurm job. - To reference a job with job array set, use the first/"master" jobid - (the same as given by squeue) - :returns: The exit code of the slurm job. - :rtype: int + + Args: + jobid (int): The job id of the slurm job. + To reference a job with job array set, use the first/"master" + jobid (the same as given by squeue) + + Returns: + The exit code of the slurm job. """ exit_status = -9999 complete = False @@ -3059,14 +3058,13 @@ cdef class job: def slurm_pid2jobid(uint32_t JobPID=0): - """ - Get the slurm job id from a process id. + """Get the slurm job id from a process id. + + Args: + JobPID (int): Job process id - :param int JobPID: Job process id - :returns: 0 for success or a slurm error code - :rtype: integer - :returns: Job Identifier - :rtype: integer + Returns: + int: 0 for success or a slurm error code """ cdef: uint32_t JobID = 0 @@ -3081,15 +3079,16 @@ def slurm_pid2jobid(uint32_t JobPID=0): cdef secs2time_str(uint32_t time): - """ - Convert seconds to Slurm string format. + """Convert seconds to Slurm string format. This method converts time in seconds (86400) to Slurm's string format (1-00:00:00). - :param int time: time in seconds - :returns: time string - :rtype: str + Args: + time (int): Time in seconds + + Returns: + str: Slurm time string. """ cdef: char *time_str @@ -3113,15 +3112,16 @@ cdef secs2time_str(uint32_t time): cdef mins2time_str(uint32_t time): - """ - Convert minutes to Slurm string format. + """Convert minutes to Slurm string format. This method converts time in minutes (14400) to Slurm's string format (10-00:00:00). - :param int time: time in minutes - :returns: time string - :rtype: str + Args: + time (int): Time in minutes + + Returns: + str: Slurm time string. """ cdef: double days, hours, minutes, seconds @@ -3162,25 +3162,25 @@ class SlurmError(Exception): # -def slurm_get_errno(): - """ - Return the slurm error as set by a slurm API call. +def slurm_get_errno() -> int: + """Return the slurm error as set by a slurm API call. - :returns: slurm error number - :rtype: integer + Returns: + Current slurm error number """ cdef int errNum = slurm.slurm_get_errno() return errNum -def slurm_strerror(int Errno=0): - """ - Return slurm error message represented by a given slurm error number. +def slurm_strerror(int Errno=0) -> str: + """Return slurm error message represented by a given slurm error number. - :param int Errno: slurm error number. - :returns: slurm error string - :rtype: string + Args: + Errno (int): slurm error number. + + Returns: + slurm error string """ cdef char* errMsg = slurm.slurm_strerror(Errno) @@ -3188,22 +3188,22 @@ def slurm_strerror(int Errno=0): def slurm_seterrno(int Errno=0): - """ - Set the slurm error number. + """Set the slurm error number. - :param int Errno: slurm error number + Args: + Errno (int): slurm error number """ slurm.slurm_seterrno(Errno) def slurm_perror(char* Msg=''): - """ - Print to standard error the supplied header. - + """Print to standard error the supplied header. + Header is followed by a colon, followed by a text description of the last Slurm error code generated. - :param string Msg: slurm program error String + Args: + Msg (str): slurm program error String """ slurm.slurm_perror(Msg) @@ -3215,9 +3215,7 @@ def slurm_perror(char* Msg=''): cdef class node: - """ - Class to access/modify/update Slurm Node Information. - """ + """Access/Modify/Update Slurm Node Information.""" cdef: slurm.node_info_msg_t *_Node_ptr @@ -3235,21 +3233,19 @@ cdef class node: def __dealloc__(self): pass - def lastUpdate(self): - """ - Return last time (epoch seconds) the node data was updated. + def lastUpdate(self) -> int: + """Return last time (epoch seconds) the node data was updated. - :returns: epoch seconds - :rtype: integer + Returns: + Epoch seconds """ return self._lastUpdate - cpdef ids(self): - """ - Return the node IDs from retrieved data. + def ids(self) -> dict: + """Return the node IDs from retrieved data. - :returns: Dictionary of node IDs - :rtype: dict + Returns: + Dictionary of node IDs """ cdef: int rc @@ -3270,22 +3266,22 @@ cdef class node: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def find_id(self, nodeID): - """ - Get node information for a given node. + def find_id(self, nodeID) -> dict: + """Get node information for a given node. + + Args: + nodeID (str): Node key string to search - :param str nodeID: Node key string to search - :returns: Dictionary of values for given node - :rtype: dict + Returns: + Dictionary of values for given node """ return list(self.get_node(nodeID).values())[0] - def get(self): - """ - Get all slurm node information. + def get(self) -> dict: + """Get all slurm node information. - :returns: Dictionary of dictionaries whose key is the node name. - :rtype: dict + Returns: + Dictionary of dictionaries whose key is the node name. """ return self.get_node(None) @@ -3293,13 +3289,14 @@ cdef class node: if gres_str: return re.split(r',(?![^(]*\))', gres_str) - def get_node(self, nodeID): - """ - Get single slurm node information. + def get_node(self, nodeID) -> dict: + """Get single slurm node information. + + Args: + nodeID (str): Node key string to search. Default NULL. - :param str nodeID: Node key string to search. Default NULL. - :returns: Dictionary of give node info data. - :rtype: dict + Returns: + Dictionary of node info data. """ cdef: int rc @@ -3522,23 +3519,24 @@ cdef class node: return self._NodeDict - cpdef update(self, dict node_dict): - """ - Update slurm node information. + def update(self, dict node_dict) -> int: + """Update slurm node information. - :param dict node_dict: A populated node dictionary, an empty one is - created by create_node_dict - :returns: 0 for success or -1 for error, and the slurm error code - is set appropriately. - :rtype: integer + Args: + node_dict (dict): A populated node dictionary, an empty one is + created by create_node_dict + + Returns: + 0 for success or -1 for error, and the slurm error code is set + appropriately. """ return slurm_update_node(node_dict) - cpdef print_node_info_msg(self, int oneLiner=False): - """ - Output information about all slurm nodes. + def print_node_info_msg(self, int oneLiner=False): + """Output information about all slurm nodes. - :param int oneLiner: Print on one line - False (Default) or True + Args: + oneLiner (int, optional): Print on one line """ cdef: int rc @@ -3556,15 +3554,16 @@ cdef class node: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) -def slurm_update_node(dict node_dict): - """ - Update slurm node information. +def slurm_update_node(dict node_dict) -> int: + """Update slurm node information. + + Args: + node_dict (dict): A populated node dictionary, an empty one is created + by create_node_dict - :param dict node_dict: A populated node dictionary, an empty one is - created by create_node_dict - :returns: 0 for success or -1 for error, and the slurm error code - is set appropriately. - :rtype: integer + Returns: + 0 for success or -1 for error, and the slurm error code is set + appropriately. """ cdef: slurm.update_node_msg_t node_msg @@ -3609,15 +3608,14 @@ def slurm_update_node(dict node_dict): return errCode -def create_node_dict(): - """ - Return a an update_node dictionary - +def create_node_dict() -> dict: + """Return a an update_node dictionary + This dictionary can be populated by the user and used for the update_node call. - :returns: Empty node dictionary - :rtype: dict + Returns: + Empty node dictionary """ return { 'node_names': None, @@ -3635,9 +3633,7 @@ def create_node_dict(): cdef class jobstep: - """ - Class to access/modify Slurm Jobstep Information. - """ + """Access/Modify Slurm Jobstep Information.""" cdef: slurm.time_t _lastUpdate @@ -3656,19 +3652,16 @@ cdef class jobstep: self.__destroy() cpdef __destroy(self): - """ - Free the slurm job memory allocated by load jobstep method. - """ + """Free the slurm job memory allocated by load jobstep method.""" self._lastUpdate = 0 self._ShowFlags = 0 self._JobStepDict = {} - def lastUpdate(self): - """ - Get the time (epoch seconds) the jobstep data was updated. + def lastUpdate(self) -> int: + """Get the time (epoch seconds) the jobstep data was updated. - :returns: epoch seconds - :rtype: integer + Returns: + Epoch seconds """ return self._lastUpdate @@ -3696,30 +3689,22 @@ cdef class jobstep: return retDict - cpdef get(self): - """ - Get slurm jobstep information. + def get(self) -> dict: + """Get slurm jobstep information. - :returns: Data whose key is the jobstep ID. - :rtype: dict + Returns: + Data whose key is the jobstep ID. """ self.__get() return self._JobStepDict cpdef __get(self): - """ - Load details about job steps. + """Load details about job steps. This method loads details about job steps that satisfy the job_id and/or step_id specifications provided if the data has been updated since the update_time specified. - - :param int JobID: Job Identifier - :param int StepID: Jobstep Identifier - :param int ShowFlags: Display flags (Default=0) - :returns: Data whose key is the job and step ID - :rtype: dict """ cdef: slurm.job_step_info_response_msg_t *job_step_info_ptr = NULL @@ -3836,14 +3821,15 @@ cdef class jobstep: self._JobStepDict = Steps - cpdef layout(self, uint32_t JobID=0, uint32_t StepID=0): - """ - Get the slurm job step layout from a given job and step id. + def layout(self, uint32_t JobID=0, uint32_t StepID=0) -> list: + """Get the slurm job step layout from a given job and step id. - :param int JobID: slurm job id (Default=0) - :param int StepID: slurm step id (Default=0) - :returns: List of job step layout. - :rtype: list + Args: + JobID (int): The job id. + StepID (int): The id of the job step. + + Returns: + List of job step layout. """ cdef: slurm.slurm_step_id_t step_id @@ -3897,9 +3883,7 @@ cdef class jobstep: cdef class hostlist: - """ - Wrapper class for Slurm hostlist functions. - """ + """Wrapper for Slurm hostlist functions.""" cdef slurm.hostlist_t hl @@ -3928,15 +3912,14 @@ cdef class hostlist: def count(self): return slurm.slurm_hostlist_count(self.hl) - cpdef get_list(self): - """ - Get the list of hostnames composing the hostlist. + def get_list(self) -> list: + """Get the list of hostnames composing the hostlist. - For example with a hostlist created with "tux[1-3]" -> [ 'tux1', tux2', - 'tux3' ]. + For example with a hostlist created with "tux[1-3]" -> [ 'tux1', + tux2', 'tux3' ]. - :returns: the list of hostnames in case of success or None on error. - :rtype: list + Returns: + The list of hostnames in case of success or None on error. """ cdef: slurm.hostlist_t hlist = NULL @@ -4021,13 +4004,15 @@ cdef class hostlist: cdef class trigger: - def set(self, dict trigger_dict): - """ - Set or create a slurm trigger. + def set(self, dict trigger_dict) -> int: + """Set or create a slurm trigger. - :param dict trigger_dict: A populated dictionary of trigger information - :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: integer + Args: + trigger_dict (dict): A populated dictionary of trigger information + + Returns: + 0 for success or -1 for error, and the slurm error code is set + appropriately. """ cdef: slurm.trigger_info_t trigger_set @@ -4098,12 +4083,11 @@ cdef class trigger: return 0 - def get(self): - """ - Get the information on slurm triggers. + def get(self) -> dict: + """Get the information on slurm triggers. - :returns: Where key is the trigger ID - :rtype: dict + Returns: + Dictionary, where keys are the trigger IDs """ cdef: slurm.trigger_info_msg_t *trigger_get = NULL @@ -4130,15 +4114,16 @@ cdef class trigger: return Triggers - def clear(self, TriggerID=0, UserID=slurm.NO_VAL, ID=0): - """ - Clear or remove a slurm trigger. + def clear(self, TriggerID=0, UserID=slurm.NO_VAL, ID=0) -> int: + """Clear or remove a slurm trigger. + + Args: + TriggerID (str): Trigger Identifier + UserID (str): User Identifier + ID (str): Job Identifier - :param string TriggerID: Trigger Identifier - :param string UserID: User Identifier - :param string ID: Job Identifier - :returns: 0 for success or a slurm error code - :rtype: integer + Returns: + 0 for success or a slurm error code """ cdef: slurm.trigger_info_t trigger_clear @@ -4169,9 +4154,7 @@ cdef class trigger: cdef class reservation: - """ - Class to access/update/delete slurm reservation Information. - """ + """Access/update/delete slurm reservation Information.""" cdef: slurm.reserve_info_msg_t *_Res_ptr @@ -4188,42 +4171,42 @@ cdef class reservation: def __dealloc__(self): self.__free() - def lastUpdate(self): - """ - Get the time (epoch seconds) the reservation data was updated. + def lastUpdate(self) -> int: + """Get the time (epoch seconds) the reservation data was updated. - :returns: epoch seconds - :rtype: integer + Returns: + epoch seconds """ return self._lastUpdate - def ids(self): - """ - Return a list of reservation IDs from retrieved data. + def ids(self) -> dict: + """Return a list of reservation IDs from retrieved data. - :returns: Dictionary of reservation IDs - :rtype: dict + Returns: + Dictionary of reservation IDs """ return self._ResDict.keys() - def find_id(self, resID): - """ - Retrieve reservation ID data. + def find_id(self, resID) -> dict: + """Retrieve reservation ID data. + + Args: + resID (str): Reservation key string to search - :param str resID: Reservation key string to search - :returns: Dictionary of values for given reservation key - :rtype: dict + Returns: + Dictionary of values for given reservation key """ return self._ResDict.get(resID, {}) - def find(self, name='', val=''): - """ - Search for property and associated value in reservation data. + def find(self, name='', val='') -> list: + """Search for property and associated value in reservation data. + + Args: + name (str): key string to search + val (str): value string to match - :param str name: key string to search - :param str value: value string to match - :returns: List of IDs that match - :rtype: list + Returns: + List of IDs that match """ # [ key for key, value in self._ResDict.items() if self._ResDict[key]['state'] == 'error'] @@ -4239,10 +4222,7 @@ cdef class reservation: self.__load() cdef int __load(self) except? -1: - """ - Load slurm reservation information. - """ - + """Load slurm reservation information.""" cdef: slurm.reserve_info_msg_t *new_reserve_info_ptr = NULL slurm.time_t last_time = NULL @@ -4271,19 +4251,16 @@ cdef class reservation: return errCode cdef __free(self): - """ - Free slurm reservation pointer. - """ + """Free slurm reservation pointer.""" if self._Res_ptr is not NULL: slurm.slurm_free_reservation_info_msg(self._Res_ptr) - def get(self): - """ - Get slurm reservation information. + def get(self) -> dict: + """Get slurm reservation information. - :returns: Data whose key is the Reservation ID - :rtype: dict + Returns: + Data whose key is the Reservation ID """ self.load() self.__get() @@ -4323,35 +4300,44 @@ cdef class reservation: self._ResDict = Reservations - def create(self, dict reservation_dict={}): - """ - Create slurm reservation. + def create(self, dict reservation_dict={}) -> int: + """Create slurm reservation. + + Args: + reservation_dict (dict): Reservation information + + Returns: + 0 for success or a slurm error code """ return slurm_create_reservation(reservation_dict) - def delete(self, ResID): - """ - Delete slurm reservation. + def delete(self, ResID) -> int: + """Delete slurm reservation. - :returns: 0 for success or a slurm error code - :rtype: integer + Args: + ResID (int): ID of the reservation to delete + + Returns: + 0 for success or a slurm error code """ return slurm_delete_reservation(ResID) - def update(self, dict reservation_dict={}): - """ - Update a slurm reservation attributes. + def update(self, dict reservation_dict={}) -> int: + """Update a slurm reservation attributes. - :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: integer + Args: + reservation_dict (dict): Reservation information + + Returns: + 0 for success or -1 for error and slurm error code is set """ return slurm_update_reservation(reservation_dict) def print_reservation_info_msg(self, int oneLiner=0): - """ - Output information about all slurm reservations. + """Output information about all slurm reservations. - :param int Flags: Print on one line - 0 (Default) or 1 + Args: + oneLiner (int, optional): Print reservation info in one-line """ if self._Res_ptr is not NULL: slurm.slurm_print_reservation_info_msg(slurm.stdout, self._Res_ptr, oneLiner) @@ -4362,15 +4348,15 @@ cdef class reservation: # -def slurm_create_reservation(dict reservation_dict={}): - """ - Create a slurm reservation. +def slurm_create_reservation(dict reservation_dict={}) -> str: + """Create a slurm reservation. - :param dict reservation_dict: A populated reservation dictionary, - an empty one is created by create_reservation_dict - :returns: 0 for success or -1 for error, and the slurm error code - is set appropriately. - :rtype: string + Args: + reservation_dict (dict): A populated reservation dictionary, an empty + one is created by create_reservation_dict + + Returns: + The name of the reservation created. """ cdef: slurm.resv_desc_msg_t resv_msg @@ -4463,15 +4449,16 @@ def slurm_create_reservation(dict reservation_dict={}): return resID -def slurm_update_reservation(dict reservation_dict={}): - """ - Update a slurm reservation. +def slurm_update_reservation(dict reservation_dict={}) -> int: + """Update a slurm reservation. + + Args: + reservation_dict (dict): A populated reservation dictionary, an empty + one is created by create_reservation_dict - :param dict reservation_dict: A populated reservation dictionary, - an empty one is created by create_reservation_dict - :returns: 0 for success or -1 for error, and the slurm error code - is set appropriately. - :rtype: integer + Returns: + 0 for success or -1 for error, and the slurm error code is set + appropriately. """ cdef: slurm.resv_desc_msg_t resv_msg @@ -4558,13 +4545,15 @@ def slurm_update_reservation(dict reservation_dict={}): return errCode -def slurm_delete_reservation(ResID): - """ - Delete a slurm reservation. +def slurm_delete_reservation(ResID) -> int: + """Delete a slurm reservation. + + Args: + ResID (str): Reservation Identifier - :param string ResID: Reservation Identifier - :returns: 0 for success or -1 for error, and the slurm error code is set appropriately. - :rtype: integer + Returns: + 0 for success or -1 for error, and the slurm error code is set + appropriately. """ cdef slurm.reservation_name_msg_t resv_msg @@ -4584,15 +4573,14 @@ def slurm_delete_reservation(ResID): return errCode -def create_reservation_dict(): - """ - Create and empty dict for use with create_reservation method. - +def create_reservation_dict() -> dict: + """Create and empty dict for use with create_reservation method. + Returns a dictionary that can be populated by the user an used for the update_reservation and create_reservation calls. - :returns: Empty Reservation dictionary - :rtype: dict + Returns: + Empty Reservation dictionary """ return { 'start_time': 0, @@ -4616,10 +4604,7 @@ def create_reservation_dict(): cdef class topology: - """ - Class to access/update slurm topology information. - """ - + """Access/update slurm topology information.""" cdef: slurm.topo_info_response_msg_t *_topo_info_ptr dict _TopoDict @@ -4631,32 +4616,25 @@ cdef class topology: def __dealloc__(self): self.__free() - def lastUpdate(self): - """ - Get the time (epoch seconds) the retrieved data was updated. + def lastUpdate(self) -> int: + """Get the time (epoch seconds) the retrieved data was updated. - :returns: epoch seconds - :rtype: integer + Returns: + Epoch seconds """ return self._lastUpdate cpdef __free(self): - """ - Free the memory returned by load method. - """ + """Free the memory returned by load method.""" if self._topo_info_ptr is not NULL: slurm.slurm_free_topo_info_msg(self._topo_info_ptr) def load(self): - """ - Load slurm topology information. - """ + """Load slurm topology information.""" self.__load() cpdef int __load(self) except? -1: - """ - Load slurm topology. - """ + """Load slurm topology.""" cdef int apiError = 0 cdef int errCode = 0 @@ -4671,12 +4649,11 @@ cdef class topology: return errCode - def get(self): - """ - Get slurm topology information. + def get(self) -> dict: + """Get slurm topology information. - :returns: Dictionary whose key is the Topology ID - :rtype: dict + Returns: + Dictionary whose key is the Topology ID """ self.__load() self.__get() @@ -4706,16 +4683,12 @@ cdef class topology: self._TopoDict = Topo def display(self): - """ - Display topology information to standard output. - """ + """Display topology information to standard output.""" self._print_topo_info_msg() cpdef _print_topo_info_msg(self): - """ - Output information about topology based upon message as loaded using slurm_load_topo. - - :param int Flags: Print on one line - False (Default), True + """Output information about topology based upon message as loaded + using slurm_load_topo. """ if self._topo_info_ptr is not NULL: @@ -4730,6 +4703,7 @@ cdef class topology: cdef class statistics: + """Slurm Controller statistics.""" cdef: slurm.stats_info_request_msg_t _req @@ -4743,11 +4717,11 @@ cdef class statistics: def __dealloc__(self): pass - cpdef dict get(self): - """ - Get slurm statistics information. + def get(self) -> dict: + """Get slurm statistics information. - :rtype: dict + Returns: + Slurm Controller statistics """ cdef: int errCode @@ -4843,7 +4817,7 @@ cdef class statistics: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - cpdef int reset(self): + def reset(self) -> int: """ Reset scheduling statistics @@ -5122,9 +5096,7 @@ cdef class statistics: cdef class front_end: - """ - Class to access/update slurm front end node information. - """ + """Access/update slurm front end node information.""" cdef: slurm.time_t Time @@ -5144,22 +5116,16 @@ cdef class front_end: self.__destroy() cpdef __destroy(self): - """ - Free the memory allocated by load front end node method. - """ + """Free the memory allocated by load front end node method.""" if self._FrontEndNode_ptr is not NULL: slurm.slurm_free_front_end_info_msg(self._FrontEndNode_ptr) def load(self): - """ - Load slurm front end node information. - """ + """Load slurm front end node information.""" self.__load() cdef int __load(self) except? -1: - """ - Load slurm front end node. - """ + """Load slurm front end node.""" cdef: # slurm.front_end_info_msg_t *new_FrontEndNode_ptr = NULL time_t last_time = NULL @@ -5179,30 +5145,27 @@ cdef class front_end: return errCode - def lastUpdate(self): - """ - Return last time (sepoch seconds) the node data was updated. + def lastUpdate(self) -> int: + """Return last time (sepoch seconds) the node data was updated. - :returns: epoch seconds - :rtype: integer + Returns: + Epoch seconds """ return self._lastUpdate - def ids(self): - """ - Return the node IDs from retrieved data. + def ids(self) -> dict: + """Return the node IDs from retrieved data. - :returns: Dictionary of node IDs - :rtype: dict + Returns: + Dictionary of node IDs """ return list(self._FrontEndDict.keys()) - def get(self): - """ - Get front end node information. + def get(self) -> dict: + """Get front end node information. - :returns: Dictionary whose key is the Topology ID - :rtype: dict + Returns: + Dictionary whose key is the Topology ID """ self.__load() self.__get() @@ -5245,9 +5208,7 @@ cdef class front_end: cdef class qos: - """ - Class to access/update slurm QOS information. - """ + """Access/update slurm QOS information.""" cdef: void *dbconn @@ -5262,22 +5223,15 @@ cdef class qos: self.__destroy() cdef __destroy(self): - """ - QOS Destructor method. - """ + """QOS Destructor method.""" self._QOSDict = {} def load(self): - """ - Load slurm QOS information. - """ - + """Load slurm QOS information.""" self.__load() cdef int __load(self) except? -1: - """ - Load slurm QOS list. - """ + """Load slurm QOS list.""" cdef: slurm.slurmdb_qos_cond_t *new_qos_cond = NULL int apiError = 0 @@ -5294,29 +5248,26 @@ cdef class qos: return 0 def lastUpdate(self): - """ - Return last time (sepoch seconds) the QOS data was updated. + """Return last time (sepoch seconds) the QOS data was updated. - :returns: epoch seconds - :rtype: integer + Returns: + int: epoch seconds """ return self._lastUpdate - def ids(self): - """ - Return the QOS IDs from retrieved data. + def ids(self) -> dict: + """Return the QOS IDs from retrieved data. - :returns: Dictionary of QOS IDs - :rtype: dict + Returns: + Dictionary of QOS IDs """ return self._QOSDict.keys() - def get(self): - """ - Get slurm QOS information. + def get(self) -> dict: + """Get slurm QOS information. - :returns: Dictionary whose key is the QOS ID - :rtype: dict + Returns: + Dictionary whose key is the QOS ID """ self.__load() self.__get() @@ -5396,10 +5347,7 @@ cdef class qos: # slurmdbd jobs Class # cdef class slurmdb_jobs: - """ - Class to access Slurmdbd Jobs information. - """ - + """Access Slurmdbd Jobs information.""" cdef: void* db_conn slurm.slurmdb_job_cond_t *job_cond @@ -5412,26 +5360,35 @@ cdef class slurmdb_jobs: slurm.xfree(self.job_cond) slurm.slurmdb_connection_close(&self.db_conn) - def get(self, jobids=[], userids=[], starttime=0, endtime=0, flags = None, db_flags = None, clusters = []): - """ - Get Slurmdb information about some jobs. - + def get(self, jobids=[], userids=[], starttime=0, endtime=0, flags = None, + db_flags = None, clusters = []) -> dict: + """Get Slurmdb information about some jobs. + Input formats for start and end times: - * today or tomorrow - * midnight, noon, teatime (4PM) - * HH:MM [AM|PM] - * MMDDYY or MM/DD/YY or MM.DD.YY - * YYYY-MM-DD[THH[:MM[:SS]]] - * now + count [minutes | hours | days | weeks] - * + * today or tomorrow + * midnight, noon, teatime (4PM) + * HH:MM [AM|PM] + * MMDDYY or MM/DD/YY or MM.DD.YY + * YYYY-MM-DD[THH[:MM[:SS]]] + * now + count [minutes | hours | days | weeks] + * + Invalid time input results in message to stderr and return value of zero. - :param jobids: Ids of the jobs to search. Defaults to all jobs. - :param starttime: Select jobs eligible after this timestamp - :param endtime: Select jobs eligible before this timestamp - :returns: Dictionary whose key is the JOBS ID - :rtype: dict + Args: + jobids (list): Ids of the jobs to search. Defaults to all jobs. + starttime (int, optional): Select jobs eligible after this + timestamp + endtime (int, optional): Select jobs eligible before this + timestamp + userids (list): List of userids + flags (int): Flags + db_flags (int): DB Flags + clusters (list): List of clusters + + Returns: + Dictionary whose key is the JOBS ID """ cdef: int i = 0 @@ -5654,10 +5611,7 @@ cdef class slurmdb_jobs: # slurmdbd Reservations Class # cdef class slurmdb_reservations: - """ - Class to access Slurmdbd reservations information. - """ - + """Access Slurmdbd reservations information.""" cdef: void *dbconn slurm.slurmdb_reservation_cond_t *reservation_cond @@ -5669,11 +5623,14 @@ cdef class slurmdb_reservations: slurm.slurmdb_destroy_reservation_cond(self.reservation_cond) def set_reservation_condition(self, start_time, end_time): - """ - Limit the next get() call to reservations that start after and before a certain time. + """Limit the next get() call to reservations that start after and + before a certain time. - :param start_time: Select reservations that start after this timestamp - :param end_time: Select reservations that end before this timestamp + Args: + start_time (int): Select reservations that start after this + unix timestamp + end_time (int): Select reservations that end before this unix + timestamp """ if self.reservation_cond == NULL: self.reservation_cond = xmalloc(sizeof(slurm.slurmdb_reservation_cond_t)) @@ -5685,12 +5642,11 @@ cdef class slurmdb_reservations: else: raise MemoryError() - def get(self): - """ - Get slurm reservations information. + def get(self) -> dict: + """Get slurm reservations information. - :returns: Dictionary whose keys are the reservations ids - :rtype: dict + Returns: + Dictionary whose keys are the reservations ids """ cdef: slurm.List reservation_list @@ -5759,10 +5715,7 @@ cdef class slurmdb_reservations: # slurmdbd clusters Class # cdef class slurmdb_clusters: - """ - Class to access Slurmdbd Clusters information. - """ - + """Access Slurmdbd Clusters information.""" cdef: void *db_conn slurm.slurmdb_cluster_cond_t *cluster_cond @@ -5777,12 +5730,14 @@ cdef class slurmdb_clusters: slurm.slurmdb_connection_close(&self.db_conn) def set_cluster_condition(self, start_time, end_time): - """ - Limit the next get() call to clusters that existed after and before + """Limit the next get() call to clusters that existed after and before a certain time. - :param start_time: Select clusters that existed after this timestamp - :param end_time: Select clusters that existed before this timestamp + Args: + start_time (int): Select clusters that existed after this unix + timestamp + end_time (int): Select clusters that existed before this unix + timestamp """ if self.cluster_cond == NULL: self.cluster_cond = xmalloc(sizeof(slurm.slurmdb_cluster_cond_t)) @@ -5796,12 +5751,11 @@ cdef class slurmdb_clusters: else: raise MemoryError() - def get(self): - """ - Get slurm clusters information. + def get(self) -> dict: + """Get slurm clusters information. - :returns: Dictionary whose keys are the clusters ids - :rtype: dict + Returns: + Dictionary whose keys are the clusters ids """ cdef: slurm.List clusters_list @@ -5879,10 +5833,7 @@ cdef class slurmdb_clusters: # slurmdbd Events Class # cdef class slurmdb_events: - """ - Class to access Slurmdbd events information. - """ - + """Access Slurmdbd events information.""" cdef: void *dbconn slurm.slurmdb_event_cond_t *event_cond @@ -5894,11 +5845,12 @@ cdef class slurmdb_events: slurm.slurmdb_destroy_event_cond(self.event_cond) def set_event_condition(self, start_time, end_time): - """ - Limit the next get() call to conditions that existed after and before a certain time. + """Limit the next get() call to conditions that existed after and + before a certain time. - :param start_time: Select conditions that existed after this timestamp - :param end_time: Select conditions that existed before this timestamp + Args: + start_time (int): Select conditions that existed after this unix timestamp + end_time (int): Select conditions that existed before this unix timestamp """ if self.event_cond == NULL: self.event_cond = xmalloc(sizeof(slurm.slurmdb_event_cond_t)) @@ -5910,12 +5862,11 @@ cdef class slurmdb_events: else: raise MemoryError() - def get(self): - """ - Get slurm events information. + def get(self) -> dict: + """Get slurm events information. - :returns: Dictionary whose keys are the events ids - :rtype: dict + Returns: + Dictionary whose keys are the events ids """ cdef: slurm.List event_list @@ -5961,10 +5912,7 @@ cdef class slurmdb_events: # cdef class slurmdb_reports: - """ - Class to access Slurmdbd reports. - """ - + """Access Slurmdbd reports.""" cdef: void *db_conn slurm.slurmdb_assoc_cond_t *assoc_cond @@ -5975,9 +5923,16 @@ cdef class slurmdb_reports: def __dealloc__(self): slurm.slurmdb_destroy_assoc_cond(self.assoc_cond) - def report_cluster_account_by_user(self, starttime=None, endtime=None): - """ - sreport cluster AccountUtilizationByUser + def report_cluster_account_by_user(self, starttime=None, + endtime=None) -> dict: + """sreport cluster AccountUtilizationByUser + + Args: + starttime (Union[str, int]): Start time + endtime (Union[str, int]): Start time + + Returns: + sreport information. """ cdef: slurm.List slurmdb_report_cluster_list = NULL @@ -6067,14 +6022,11 @@ cdef class slurmdb_reports: # -def get_last_slurm_error(): - """ - Get and return the last error from a slurm API call. +def get_last_slurm_error() -> int: + """Get and return the last error from a slurm API call. - :returns: Slurm error number and the associated error string - :rtype: integer - :returns: Slurm error string - :rtype: string + Returns: + Slurm error number and the associated error string """ rc = slurm.slurm_get_errno() @@ -6084,12 +6036,13 @@ def get_last_slurm_error(): return (rc, slurm.stringOrNone(slurm.slurm_strerror(rc), '')) cdef inline dict __get_licenses(char *licenses): - """ - Returns a dict of licenses from the slurm license string. + """Returns a dict of licenses from the slurm license string. + + Args: + licenses (str): String containing license information - :param string licenses: String containing license information - :returns: Dictionary of licenses and associated value. - :rtype: dict + Returns: + dict: Dictionary of licenses and associated value. """ if (licenses is NULL): return {} @@ -6113,30 +6066,32 @@ cdef inline dict __get_licenses(char *licenses): def get_node_use(inx): - """ - Returns a string that represents the block node mode. + """Returns a string that represents the block node mode. + + Args: + ResType: Slurm block node usage - :param int ResType: Slurm block node usage - :returns: Block node usage string - :rtype: string + Returns: + use (str): Block node usage string """ return slurm.slurm_node_state_string(inx) -def get_trigger_res_type(uint16_t inx): - """ - Returns a string that represents the slurm trigger res type. - - :param int ResType: Slurm trigger res state - - TRIGGER_RES_TYPE_JOB 1 - - TRIGGER_RES_TYPE_NODE 2 - - TRIGGER_RES_TYPE_SLURMCTLD 3 - - TRIGGER_RES_TYPE_SLURMDBD 4 - - TRIGGER_RES_TYPE_DATABASE 5 - - TRIGGER_RES_TYPE_FRONT_END 6 - - TRIGGER_RES_TYPE_OTHER 7 - :returns: Trigger reservation state string - :rtype: string +def get_trigger_res_type(uint16_t inx) -> str: + """Returns a string that represents the slurm trigger res type. + + Args: + ResType (int): Slurm trigger res state + * TRIGGER_RES_TYPE_JOB 1 + * TRIGGER_RES_TYPE_NODE 2 + * TRIGGER_RES_TYPE_SLURMCTLD 3 + * TRIGGER_RES_TYPE_SLURMDBD 4 + * TRIGGER_RES_TYPE_DATABASE 5 + * TRIGGER_RES_TYPE_FRONT_END 6 + * TRIGGER_RES_TYPE_OTHER 7 + + Returns: + Trigger reservation state string """ return __get_trigger_res_type(inx) @@ -6161,33 +6116,34 @@ cdef inline object __get_trigger_res_type(uint16_t ResType): return "%s" % rtype -def get_trigger_type(uint32_t inx): - """ - Returns a string that represents the state of the slurm trigger. - - :param int TriggerType: Slurm trigger type - - TRIGGER_TYPE_UP 0x00000001 - - TRIGGER_TYPE_DOWN 0x00000002 - - TRIGGER_TYPE_FAIL 0x00000004 - - TRIGGER_TYPE_TIME 0x00000008 - - TRIGGER_TYPE_FINI 0x00000010 - - TRIGGER_TYPE_RECONFIG 0x00000020 - - TRIGGER_TYPE_IDLE 0x00000080 - - TRIGGER_TYPE_DRAINED 0x00000100 - - TRIGGER_TYPE_PRI_CTLD_FAIL 0x00000200 - - TRIGGER_TYPE_PRI_CTLD_RES_OP 0x00000400 - - TRIGGER_TYPE_PRI_CTLD_RES_CTRL 0x00000800 - - TRIGGER_TYPE_PRI_CTLD_ACCT_FULL 0x00001000 - - TRIGGER_TYPE_BU_CTLD_FAIL 0x00002000 - - TRIGGER_TYPE_BU_CTLD_RES_OP 0x00004000 - - TRIGGER_TYPE_BU_CTLD_AS_CTRL 0x00008000 - - TRIGGER_TYPE_PRI_DBD_FAIL 0x00010000 - - TRIGGER_TYPE_PRI_DBD_RES_OP 0x00020000 - - TRIGGER_TYPE_PRI_DB_FAIL 0x00040000 - - TRIGGER_TYPE_PRI_DB_RES_OP 0x00080000 - - TRIGGER_TYPE_BURST_BUFFER 0x00100000 - :returns: Trigger state string - :rtype: string +def get_trigger_type(uint32_t inx) -> str: + """Returns a string that represents the state of the slurm trigger. + + Args: + TriggerType (int): Slurm trigger type + * TRIGGER_TYPE_UP 0x00000001 + * TRIGGER_TYPE_DOWN 0x00000002 + * TRIGGER_TYPE_FAIL 0x00000004 + * TRIGGER_TYPE_TIME 0x00000008 + * TRIGGER_TYPE_FINI 0x00000010 + * TRIGGER_TYPE_RECONFIG 0x00000020 + * TRIGGER_TYPE_IDLE 0x00000080 + * TRIGGER_TYPE_DRAINED 0x00000100 + * TRIGGER_TYPE_PRI_CTLD_FAIL 0x00000200 + * TRIGGER_TYPE_PRI_CTLD_RES_OP 0x00000400 + * TRIGGER_TYPE_PRI_CTLD_RES_CTRL 0x00000800 + * TRIGGER_TYPE_PRI_CTLD_ACCT_FULL 0x00001000 + * TRIGGER_TYPE_BU_CTLD_FAIL 0x00002000 + * TRIGGER_TYPE_BU_CTLD_RES_OP 0x00004000 + * TRIGGER_TYPE_BU_CTLD_AS_CTRL 0x00008000 + * TRIGGER_TYPE_PRI_DBD_FAIL 0x00010000 + * TRIGGER_TYPE_PRI_DBD_RES_OP 0x00020000 + * TRIGGER_TYPE_PRI_DB_FAIL 0x00040000 + * TRIGGER_TYPE_PRI_DB_RES_OP 0x00080000 + * TRIGGER_TYPE_BURST_BUFFER 0x00100000 + + Returns: + Trigger state string """ return __get_trigger_type(inx) @@ -6270,12 +6226,14 @@ cdef inline object __get_trigger_type(uint32_t TriggerType): # pass -def get_debug_flags(uint64_t inx): - """ Returns a string that represents the slurm debug flags. +def get_debug_flags(uint64_t inx) -> str: + """Returns a string that represents the slurm debug flags. + + Args: + flags (int): Slurm debug flags - :param int flags: Slurm debug flags - :returns: Debug flag string - :rtype: string + Returns: + Debug flag string """ return debug_flags2str(inx) @@ -6421,23 +6379,25 @@ cdef inline list debug_flags2str(uint64_t debug_flags): def get_node_state(uint32_t inx): - """ - Returns a string that represents the state of the slurm node. + """Returns a string that represents the state of the slurm node. - :param int inx: Slurm node state - :returns: Node state string - :rtype: string + Args: + inx (int): Slurm node state + + Returns: + state (str): Node state string """ return slurm.slurm_node_state_string(inx) -def get_rm_partition_state(int inx): - """ - Returns a string that represents the partition state. +def get_rm_partition_state(int inx) -> str: + """Returns a string that represents the partition state. - :param int inx: Slurm partition state - :returns: Partition state string - :rtype: string + Args: + inx (int): Slurm partition state + + Returns: + Partition state string """ return __get_rm_partition_state(inx) @@ -6463,32 +6423,34 @@ cdef inline object __get_rm_partition_state(int inx): def get_preempt_mode(uint16_t inx): - """ - Returns a string that represents the preempt mode. - - :param int inx: Slurm preempt mode - - PREEMPT_MODE_OFF 0x0000 - - PREEMPT_MODE_SUSPEND 0x0001 - - PREEMPT_MODE_REQUEUE 0x0002 - - PREEMPT_MODE_CANCEL 0x0008 - - PREEMPT_MODE_GANG 0x8000 - :returns: Preempt mode string - :rtype: string + """Returns a string that represents the preempt mode. + + Args: + inx (int): Slurm preempt mode + * PREEMPT_MODE_OFF 0x0000 + * PREEMPT_MODE_SUSPEND 0x0001 + * PREEMPT_MODE_REQUEUE 0x0002 + * PREEMPT_MODE_CANCEL 0x0008 + * PREEMPT_MODE_GANG 0x8000 + + Returns: + mode (str): Preempt mode string """ return slurm.slurm_preempt_mode_string(inx) -def get_partition_state(uint16_t inx): - """ - Returns a string that represents the state of the slurm partition. - - :param int inx: Slurm partition state - - PARTITION_DOWN 0x01 - - PARTITION_UP 0x01 | 0x02 - - PARTITION_DRAIN 0x02 - - PARTITION_INACTIVE 0x00 - :returns: Partition state string - :rtype: string +def get_partition_state(uint16_t inx) -> str: + """Returns a string that represents the state of the slurm partition. + + Args: + inx (int): Slurm partition state + * PARTITION_DOWN 0x01 + * PARTITION_UP 0x01 | 0x02 + * PARTITION_DRAIN 0x02 + * PARTITION_INACTIVE 0x00 + + Returns: + Partition state string """ state = "" if inx: @@ -6506,13 +6468,14 @@ def get_partition_state(uint16_t inx): return state cdef inline object __get_partition_state(int inx, int extended=0): - """ - Returns a string that represents the state of the partition. + """Returns a string that represents the state of the partition. - :param int inx: Slurm partition type - :param int extended: - :returns: Partition state - :rtype: string + Args: + inx (int): Slurm partition type + extended (int): extended flag + + Returns: + str: Partition state """ cdef: int drain_flag = (inx & 0x0200) @@ -6563,13 +6526,15 @@ cdef inline object __get_partition_state(int inx, int extended=0): return "%s" % state -def get_partition_mode(uint16_t flags=0, uint16_t max_share=0): - """ - Returns a string represents the state of the partition mode. +def get_partition_mode(uint16_t flags=0, uint16_t max_share=0) -> str: + """Returns a string represents the state of the partition mode. + + Args: + flags (int): Flags + max_share (int): Max share - :param int inx: Slurm partition mode - :returns: Partition mode string - :rtype: string + Returns: + Partition mode string """ return __get_partition_mode(flags, max_share) @@ -6621,26 +6586,27 @@ cdef inline dict __get_partition_mode(uint16_t flags=0, uint16_t max_share=0): return mode -def get_job_state(inx): - """ - Return the state of the slurm job state. - - :param int inx: Slurm job state - - JOB_PENDING 0 - - JOB_RUNNING 1 - - JOB_SUSPENDED 2 - - JOB_COMPLETE 3 - - JOB_CANCELLED 4 - - JOB_FAILED 5 - - JOB_TIMEOUT 6 - - JOB_NODE_FAIL 7 - - JOB_PREEMPTED 8 - - JOB_BOOT_FAIL 10 - - JOB_DEADLINE 11 - - JOB_OOM 12 - - JOB_END - :returns: Job state string - :rtype: string +def get_job_state(inx) -> str: + """Return the state of the slurm job state. + + Args: + inx (int): Slurm job state + * JOB_PENDING 0 + * JOB_RUNNING 1 + * JOB_SUSPENDED 2 + * JOB_COMPLETE 3 + * JOB_CANCELLED 4 + * JOB_FAILED 5 + * JOB_TIMEOUT 6 + * JOB_NODE_FAIL 7 + * JOB_PREEMPTED 8 + * JOB_BOOT_FAIL 10 + * JOB_DEADLINE 11 + * JOB_OOM 12 + * JOB_END + + Returns: + Job state string """ try: job_state = slurm.stringOrNone(slurm.slurm_job_state_string(inx), '') @@ -6649,25 +6615,27 @@ def get_job_state(inx): pass -def get_job_state_reason(inx): - """ - Returns a reason why the slurm job is in a provided state. +def get_job_state_reason(inx) -> str: + """Returns a reason why the slurm job is in a provided state. + + Args: + inx (int): Slurm job state reason - :param int inx: Slurm job state reason - :returns: Reason string - :rtype: string + Returns: + Reason string """ job_reason = slurm.stringOrNone(slurm.slurm_job_reason_string(inx), '') return job_reason -def epoch2date(epochSecs): - """ - Convert epoch secs to a python time string. +def epoch2date(epochSecs) -> str: + """Convert epoch secs to a python time string. + + Args: + epochSecs (int): Seconds since epoch - :param int epochSecs: Seconds since epoch - :returns: Date - :rtype: string + Returns: + Date str """ try: dateTime = p_time.gmtime(epochSecs) @@ -6703,9 +6671,7 @@ class Dict(defaultdict): cdef class licenses: - """ - Class to access slurm controller license information. - """ + """Access slurm controller license information.""" cdef: slurm.license_info_msg_t *_msg @@ -6719,30 +6685,26 @@ cdef class licenses: self._lastUpdate = NULL def __dealloc__(self): - """ - Free the memory allocated by load licenses method. - """ + """Free the memory allocated by load licenses method.""" pass - def lastUpdate(self): - """ - Return last time (epoch seconds) license data was updated. + def lastUpdate(self) -> int: + """Return last time (epoch seconds) license data was updated. - :returns: epoch seconds - :rtype: integer + Returns: + Epoch seconds """ return self._lastUpdate - def ids(self): - """ - Return the current license names from retrieved license data. - + def ids(self) -> dict: + """Return the current license names from retrieved license data. + This method calls slurm_load_licenses to retrieve license information from the controller. slurm_free_license_info_msg is used to free the license message buffer. - :returns: Dictionary of licenses - :rtype: dict + Returns: + Dictionary of licenses """ cdef: int rc @@ -6766,16 +6728,15 @@ cdef class licenses: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - cpdef get(self): - """ - Get full license information from the slurm controller. + def get(self) -> dict: + """Get full license information from the slurm controller. This method calls slurm_load_licenses to retrieve license information from the controller. slurm_free_license_info_msg is used to free the license message buffer. - :returns: Dictionary whose key is the license name - :rtype: dict + Returns: + Dictionary whose key is the license name """ cdef: int rc From f506d63634a9b20bfe475534589300beff4a8843 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Fri, 7 Apr 2023 13:19:38 +0200 Subject: [PATCH 13/48] Support for Slurm 23.02.X (#277) --- README.md | 12 +- pyslurm/__version__.py | 2 +- pyslurm/pydefines/slurm_defines.pxi | 2 - pyslurm/pydefines/slurm_enums.pxi | 6 - pyslurm/pydefines/slurm_errno_enums.pxi | 27 -- pyslurm/pydefines/slurmdb_defines.pxi | 1 - pyslurm/pyslurm.pyx | 6 - pyslurm/slurm/header.pxi | 322 ++++++++++++++++-------- setup.cfg | 2 +- setup.py | 4 +- 10 files changed, 229 insertions(+), 155 deletions(-) diff --git a/README.md b/README.md index 413112a0..9e92dc43 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ PySlurm is the Python client library for the [Slurm](https://slurm.schedmd.com) * [Python](https://www.python.org) - >= 3.6 * [Cython](https://cython.org) - >= 0.29.30 but < 3.0 -This PySlurm branch is for the Slurm Major-Release 22.05 +This PySlurm branch is for the Slurm Major-Release 23.02 ## Installation @@ -24,8 +24,8 @@ the corresponding paths to the necessary files. You can specify these Paths with environment variables (recommended), for example: ```shell -export SLURM_INCLUDE_DIR=/opt/slurm/22.05/include -export SLURM_LIB_DIR=/opt/slurm/22.05/lib +export SLURM_INCLUDE_DIR=/opt/slurm/23.02/include +export SLURM_LIB_DIR=/opt/slurm/23.02/lib ``` Then you can proceed to install PySlurm, for example by cloning the Repository: @@ -44,9 +44,9 @@ Also see `python setup.py --help` PySlurm's versioning scheme follows the official Slurm versioning. The first two numbers (MAJOR.MINOR) always correspond to Slurms Major-Release, for example -`22.05`. The last number (MICRO) is however not tied in any way to Slurms -MICRO version. For example, any PySlurm 22.05.X version should work with any -Slurm 22.05.X release. +`23.02`. The last number (MICRO) is however not tied in any way to Slurms +MICRO version. For example, any PySlurm 23.02.X version should work with any +Slurm 23.02.X release. ## Documentation diff --git a/pyslurm/__version__.py b/pyslurm/__version__.py index 416525d0..37000654 100644 --- a/pyslurm/__version__.py +++ b/pyslurm/__version__.py @@ -1 +1 @@ -__version__ = "22.5.1" +__version__ = "23.2.0" diff --git a/pyslurm/pydefines/slurm_defines.pxi b/pyslurm/pydefines/slurm_defines.pxi index a9ac41b6..f700a839 100644 --- a/pyslurm/pydefines/slurm_defines.pxi +++ b/pyslurm/pydefines/slurm_defines.pxi @@ -157,7 +157,6 @@ PRIVATE_DATA_USAGE = slurm.PRIVATE_DATA_USAGE PRIVATE_DATA_USERS = slurm.PRIVATE_DATA_USERS PRIVATE_DATA_ACCOUNTS = slurm.PRIVATE_DATA_ACCOUNTS PRIVATE_DATA_RESERVATIONS = slurm.PRIVATE_DATA_RESERVATIONS -PRIVATE_CLOUD_NODES = slurm.PRIVATE_CLOUD_NODES PRIVATE_DATA_EVENTS = slurm.PRIVATE_DATA_EVENTS PRIORITY_RESET_NONE = slurm.PRIORITY_RESET_NONE @@ -329,7 +328,6 @@ DEBUG_FLAG_POWER = slurm.DEBUG_FLAG_POWER DEBUG_FLAG_TIME_CRAY = slurm.DEBUG_FLAG_TIME_CRAY DEBUG_FLAG_DB_ARCHIVE = slurm.DEBUG_FLAG_DB_ARCHIVE DEBUG_FLAG_DB_TRES = slurm.DEBUG_FLAG_DB_TRES -DEBUG_FLAG_ESEARCH = slurm.DEBUG_FLAG_ESEARCH DEBUG_FLAG_NODE_FEATURES = slurm.DEBUG_FLAG_NODE_FEATURES DEBUG_FLAG_FEDR = slurm.DEBUG_FLAG_FEDR DEBUG_FLAG_HETJOB = slurm.DEBUG_FLAG_HETJOB diff --git a/pyslurm/pydefines/slurm_enums.pxi b/pyslurm/pydefines/slurm_enums.pxi index 73a93c4a..38aab46c 100644 --- a/pyslurm/pydefines/slurm_enums.pxi +++ b/pyslurm/pydefines/slurm_enums.pxi @@ -52,8 +52,6 @@ WAIT_QOS_THRES = slurm.WAIT_QOS_THRES WAIT_QOS_JOB_LIMIT = slurm.WAIT_QOS_JOB_LIMIT WAIT_QOS_RESOURCE_LIMIT = slurm.WAIT_QOS_RESOURCE_LIMIT WAIT_QOS_TIME_LIMIT = slurm.WAIT_QOS_TIME_LIMIT -WAIT_BLOCK_MAX_ERR = slurm.WAIT_BLOCK_MAX_ERR -WAIT_BLOCK_D_ACTION = slurm.WAIT_BLOCK_D_ACTION WAIT_CLEANING = slurm.WAIT_CLEANING WAIT_PROLOG = slurm.WAIT_PROLOG WAIT_QOS = slurm.WAIT_QOS @@ -260,11 +258,7 @@ SWITCH_PLUGIN_CRAY = slurm.SWITCH_PLUGIN_CRAY # enum select_jobdata_type -SELECT_JOBDATA_PAGG_ID = slurm.SELECT_JOBDATA_PAGG_ID -SELECT_JOBDATA_PTR = slurm.SELECT_JOBDATA_PTR -SELECT_JOBDATA_CLEANING = slurm.SELECT_JOBDATA_CLEANING SELECT_JOBDATA_NETWORK = slurm.SELECT_JOBDATA_NETWORK -SELECT_JOBDATA_RELEASED = slurm.SELECT_JOBDATA_RELEASED # end enum select_jobdata_type diff --git a/pyslurm/pydefines/slurm_errno_enums.pxi b/pyslurm/pydefines/slurm_errno_enums.pxi index 9fa6eea6..4cfdabe5 100644 --- a/pyslurm/pydefines/slurm_errno_enums.pxi +++ b/pyslurm/pydefines/slurm_errno_enums.pxi @@ -56,8 +56,6 @@ ESLURM_NOT_SUPPORTED = slurm.ESLURM_NOT_SUPPORTED ESLURM_DISABLED = slurm.ESLURM_DISABLED ESLURM_DEPENDENCY = slurm.ESLURM_DEPENDENCY ESLURM_BATCH_ONLY = slurm.ESLURM_BATCH_ONLY -ESLURM_TASKDIST_ARBITRARY_UNSUPPORTED = slurm.ESLURM_TASKDIST_ARBITRARY_UNSUPPORTED -ESLURM_TASKDIST_REQUIRES_OVERCOMMIT = slurm.ESLURM_TASKDIST_REQUIRES_OVERCOMMIT ESLURM_JOB_HELD = slurm.ESLURM_JOB_HELD ESLURM_INVALID_CRED_TYPE_CHANGE = slurm.ESLURM_INVALID_CRED_TYPE_CHANGE ESLURM_INVALID_TASK_MEMORY = slurm.ESLURM_INVALID_TASK_MEMORY @@ -79,9 +77,6 @@ ESLURM_PORTS_BUSY = slurm.ESLURM_PORTS_BUSY ESLURM_PORTS_INVALID = slurm.ESLURM_PORTS_INVALID ESLURM_PROLOG_RUNNING = slurm.ESLURM_PROLOG_RUNNING ESLURM_NO_STEPS = slurm.ESLURM_NO_STEPS -ESLURM_INVALID_BLOCK_STATE = slurm.ESLURM_INVALID_BLOCK_STATE -ESLURM_INVALID_BLOCK_LAYOUT = slurm.ESLURM_INVALID_BLOCK_LAYOUT -ESLURM_INVALID_BLOCK_NAME = slurm.ESLURM_INVALID_BLOCK_NAME ESLURM_INVALID_QOS = slurm.ESLURM_INVALID_QOS ESLURM_QOS_PREEMPTION_LOOP = slurm.ESLURM_QOS_PREEMPTION_LOOP ESLURM_NODE_NOT_AVAIL = slurm.ESLURM_NODE_NOT_AVAIL @@ -141,53 +136,31 @@ ESLURM_INVALID_TIME_MIN_LIMIT = slurm.ESLURM_INVALID_TIME_MIN_LIMIT ESLURM_DEFER = slurm.ESLURM_DEFER ESLURM_CONFIGLESS_DISABLED = slurm.ESLURM_CONFIGLESS_DISABLED ESLURM_ENVIRONMENT_MISSING = slurm.ESLURM_ENVIRONMENT_MISSING -ESLURMD_PIPE_ERROR_ON_TASK_SPAWN = slurm.ESLURMD_PIPE_ERROR_ON_TASK_SPAWN ESLURMD_KILL_TASK_FAILED = slurm.ESLURMD_KILL_TASK_FAILED ESLURMD_KILL_JOB_ALREADY_COMPLETE = slurm.ESLURMD_KILL_JOB_ALREADY_COMPLETE ESLURMD_INVALID_ACCT_FREQ = slurm.ESLURMD_INVALID_ACCT_FREQ ESLURMD_INVALID_JOB_CREDENTIAL = slurm.ESLURMD_INVALID_JOB_CREDENTIAL -ESLURMD_UID_NOT_FOUND = slurm.ESLURMD_UID_NOT_FOUND -ESLURMD_GID_NOT_FOUND = slurm.ESLURMD_GID_NOT_FOUND ESLURMD_CREDENTIAL_EXPIRED = slurm.ESLURMD_CREDENTIAL_EXPIRED ESLURMD_CREDENTIAL_REVOKED = slurm.ESLURMD_CREDENTIAL_REVOKED ESLURMD_CREDENTIAL_REPLAYED = slurm.ESLURMD_CREDENTIAL_REPLAYED ESLURMD_CREATE_BATCH_DIR_ERROR = slurm.ESLURMD_CREATE_BATCH_DIR_ERROR -ESLURMD_MODIFY_BATCH_DIR_ERROR = slurm.ESLURMD_MODIFY_BATCH_DIR_ERROR -ESLURMD_CREATE_BATCH_SCRIPT_ERROR = slurm.ESLURMD_CREATE_BATCH_SCRIPT_ERROR -ESLURMD_MODIFY_BATCH_SCRIPT_ERROR = slurm.ESLURMD_MODIFY_BATCH_SCRIPT_ERROR ESLURMD_SETUP_ENVIRONMENT_ERROR = slurm.ESLURMD_SETUP_ENVIRONMENT_ERROR -ESLURMD_SHARED_MEMORY_ERROR = slurm.ESLURMD_SHARED_MEMORY_ERROR ESLURMD_SET_UID_OR_GID_ERROR = slurm.ESLURMD_SET_UID_OR_GID_ERROR -ESLURMD_SET_SID_ERROR = slurm.ESLURMD_SET_SID_ERROR -ESLURMD_CANNOT_SPAWN_IO_THREAD = slurm.ESLURMD_CANNOT_SPAWN_IO_THREAD -ESLURMD_FORK_FAILED = slurm.ESLURMD_FORK_FAILED ESLURMD_EXECVE_FAILED = slurm.ESLURMD_EXECVE_FAILED ESLURMD_IO_ERROR = slurm.ESLURMD_IO_ERROR ESLURMD_PROLOG_FAILED = slurm.ESLURMD_PROLOG_FAILED ESLURMD_EPILOG_FAILED = slurm.ESLURMD_EPILOG_FAILED -ESLURMD_SESSION_KILLED = slurm.ESLURMD_SESSION_KILLED ESLURMD_TOOMANYSTEPS = slurm.ESLURMD_TOOMANYSTEPS ESLURMD_STEP_EXISTS = slurm.ESLURMD_STEP_EXISTS ESLURMD_JOB_NOTRUNNING = slurm.ESLURMD_JOB_NOTRUNNING ESLURMD_STEP_SUSPENDED = slurm.ESLURMD_STEP_SUSPENDED ESLURMD_STEP_NOTSUSPENDED = slurm.ESLURMD_STEP_NOTSUSPENDED ESLURMD_INVALID_SOCKET_NAME_LEN = slurm.ESLURMD_INVALID_SOCKET_NAME_LEN -ESCRIPT_CHDIR_FAILED = slurm.ESCRIPT_CHDIR_FAILED -ESCRIPT_OPEN_OUTPUT_FAILED = slurm.ESCRIPT_OPEN_OUTPUT_FAILED -ESCRIPT_NON_ZERO_RETURN = slurm.ESCRIPT_NON_ZERO_RETURN -SLURM_PROTOCOL_SOCKET_IMPL_ZERO_RECV_LENGTH = slurm.SLURM_PROTOCOL_SOCKET_IMPL_ZERO_RECV_LENGTH -SLURM_PROTOCOL_SOCKET_IMPL_NEGATIVE_RECV_LENGTH = slurm.SLURM_PROTOCOL_SOCKET_IMPL_NEGATIVE_RECV_LENGTH -SLURM_PROTOCOL_SOCKET_IMPL_NOT_ALL_DATA_SENT = slurm.SLURM_PROTOCOL_SOCKET_IMPL_NOT_ALL_DATA_SENT ESLURM_PROTOCOL_INCOMPLETE_PACKET = slurm.ESLURM_PROTOCOL_INCOMPLETE_PACKET SLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT = slurm.SLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT SLURM_PROTOCOL_SOCKET_ZERO_BYTES_SENT = slurm.SLURM_PROTOCOL_SOCKET_ZERO_BYTES_SENT ESLURM_AUTH_CRED_INVALID = slurm.ESLURM_AUTH_CRED_INVALID -ESLURM_AUTH_FOPEN_ERROR = slurm.ESLURM_AUTH_FOPEN_ERROR -ESLURM_AUTH_NET_ERROR = slurm.ESLURM_AUTH_NET_ERROR -ESLURM_AUTH_UNABLE_TO_SIGN = slurm.ESLURM_AUTH_UNABLE_TO_SIGN ESLURM_AUTH_BADARG = slurm.ESLURM_AUTH_BADARG -ESLURM_AUTH_MEMORY = slurm.ESLURM_AUTH_MEMORY -ESLURM_AUTH_INVALID = slurm.ESLURM_AUTH_INVALID ESLURM_AUTH_UNPACK = slurm.ESLURM_AUTH_UNPACK ESLURM_DB_CONNECTION = slurm.ESLURM_DB_CONNECTION ESLURM_JOBS_RUNNING_ON_ASSOC = slurm.ESLURM_JOBS_RUNNING_ON_ASSOC diff --git a/pyslurm/pydefines/slurmdb_defines.pxi b/pyslurm/pydefines/slurmdb_defines.pxi index a09be533..8cea05f0 100644 --- a/pyslurm/pydefines/slurmdb_defines.pxi +++ b/pyslurm/pydefines/slurmdb_defines.pxi @@ -60,7 +60,6 @@ SLURMDB_FS_USE_PARENT = slurm.SLURMDB_FS_USE_PARENT SLURMDB_CLASSIFIED_FLAG = slurm.SLURMDB_CLASSIFIED_FLAG SLURMDB_CLASS_BASE = slurm.SLURMDB_CLASS_BASE -CLUSTER_FLAG_A1 = slurm.CLUSTER_FLAG_A1 CLUSTER_FLAG_A2 = slurm.CLUSTER_FLAG_A2 CLUSTER_FLAG_A3 = slurm.CLUSTER_FLAG_A3 CLUSTER_FLAG_A4 = slurm.CLUSTER_FLAG_A4 diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index a179c4fb..bfcae7a7 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -641,8 +641,6 @@ cdef class config: Ctl_dict['keep_alive_time'] = slurm.int16orNone(self.__Config_ptr.keepalive_time) Ctl_dict['kill_on_bad_exit'] = bool(self.__Config_ptr.kill_on_bad_exit) Ctl_dict['kill_wait'] = self.__Config_ptr.kill_wait - Ctl_dict['launch_params'] = slurm.stringOrNone(self.__Config_ptr.launch_type, '') - Ctl_dict['launch_type'] = slurm.stringOrNone(self.__Config_ptr.launch_type, '') Ctl_dict['licenses'] = __get_licenses(self.__Config_ptr.licenses) Ctl_dict['log_fmt'] = self.__Config_ptr.log_fmt Ctl_dict['mail_domain'] = slurm.stringOrNone(self.__Config_ptr.mail_domain, '') @@ -738,7 +736,6 @@ cdef class config: # TODO: slurmctld_host Ctl_dict['slurmctld_logfile'] = slurm.stringOrNone(self.__Config_ptr.slurmctld_logfile, '') Ctl_dict['slurmctld_pidfile'] = slurm.stringOrNone(self.__Config_ptr.slurmctld_pidfile, '') - Ctl_dict['slurmctld_plugstack'] = slurm.stringOrNone(self.__Config_ptr.slurmctld_plugstack, '') Ctl_dict['slurmctld_port'] = self.__Config_ptr.slurmctld_port Ctl_dict['slurmctld_port_count'] = self.__Config_ptr.slurmctld_port_count Ctl_dict['slurmctld_primary_off_prog'] = slurm.stringOrNone(self.__Config_ptr.slurmctld_primary_off_prog, '') @@ -6297,9 +6294,6 @@ cdef inline list debug_flags2str(uint64_t debug_flags): if (debug_flags & DEBUG_FLAG_DB_WCKEY): debugFlags.append('DB_WCKey') - if (debug_flags & DEBUG_FLAG_ESEARCH): - debugFlags.append('Elasticsearch') - if (debug_flags & DEBUG_FLAG_ENERGY): debugFlags.append('Energy') diff --git a/pyslurm/slurm/header.pxi b/pyslurm/slurm/header.pxi index 7de32bf2..2457fcbc 100644 --- a/pyslurm/slurm/header.pxi +++ b/pyslurm/slurm/header.pxi @@ -25,6 +25,7 @@ cdef extern from "slurm/slurm_errno.h": SLURMCTLD_COMMUNICATIONS_SEND_ERROR SLURMCTLD_COMMUNICATIONS_RECEIVE_ERROR SLURMCTLD_COMMUNICATIONS_SHUTDOWN_ERROR + SLURMCTLD_COMMUNICATIONS_BACKOFF SLURM_NO_CHANGE_IN_DATA ESLURM_INVALID_PARTITION_NAME ESLURM_DEFAULT_PARTITION_NOT_SET @@ -66,8 +67,7 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_DISABLED ESLURM_DEPENDENCY ESLURM_BATCH_ONLY - ESLURM_TASKDIST_ARBITRARY_UNSUPPORTED - ESLURM_TASKDIST_REQUIRES_OVERCOMMIT + ESLURM_LICENSES_UNAVAILABLE ESLURM_JOB_HELD ESLURM_INVALID_CRED_TYPE_CHANGE ESLURM_INVALID_TASK_MEMORY @@ -89,9 +89,6 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_PORTS_INVALID ESLURM_PROLOG_RUNNING ESLURM_NO_STEPS - ESLURM_INVALID_BLOCK_STATE - ESLURM_INVALID_BLOCK_LAYOUT - ESLURM_INVALID_BLOCK_NAME ESLURM_INVALID_QOS ESLURM_QOS_PREEMPTION_LOOP ESLURM_NODE_NOT_AVAIL @@ -123,6 +120,8 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_INVALID_BURST_BUFFER_REQUEST ESLURM_PRIO_RESET_FAIL ESLURM_CANNOT_MODIFY_CRON_JOB + ESLURM_INVALID_JOB_CONTAINER_CHANGE + ESLURM_CANNOT_CANCEL_CRON_JOB ESLURM_INVALID_MCS_LABEL ESLURM_BURST_BUFFER_WAIT ESLURM_PARTITION_DOWN @@ -159,6 +158,33 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_BAD_THREAD_PER_CORE ESLURM_INVALID_PREFER ESLURM_INSUFFICIENT_GRES + ESLURM_INVALID_CONTAINER_ID + ESLURM_EMPTY_JOB_ID + ESLURM_INVALID_JOB_ID_ZERO + ESLURM_INVALID_JOB_ID_NEGATIVE + ESLURM_INVALID_JOB_ID_TOO_LARGE + ESLURM_INVALID_JOB_ID_NON_NUMERIC + ESLURM_EMPTY_JOB_ARRAY_ID + ESLURM_INVALID_JOB_ARRAY_ID_NEGATIVE + ESLURM_INVALID_JOB_ARRAY_ID_TOO_LARGE + ESLURM_INVALID_JOB_ARRAY_ID_NON_NUMERIC + ESLURM_INVALID_HET_JOB_AND_ARRAY + ESLURM_EMPTY_HET_JOB_COMP + ESLURM_INVALID_HET_JOB_COMP_NEGATIVE + ESLURM_INVALID_HET_JOB_COMP_TOO_LARGE + ESLURM_INVALID_HET_JOB_COMP_NON_NUMERIC + ESLURM_EMPTY_STEP_ID + ESLURM_INVALID_STEP_ID_NEGATIVE + ESLURM_INVALID_STEP_ID_TOO_LARGE + ESLURM_INVALID_STEP_ID_NON_NUMERIC + ESLURM_EMPTY_HET_STEP + ESLURM_INVALID_HET_STEP_ZERO + ESLURM_INVALID_HET_STEP_NEGATIVE + ESLURM_INVALID_HET_STEP_TOO_LARGE + ESLURM_INVALID_HET_STEP_NON_NUMERIC + ESLURM_INVALID_HET_STEP_JOB + ESLURM_JOB_TIMEOUT_KILLED + ESLURM_JOB_NODE_FAIL_KILLED ESPANK_ERROR ESPANK_BAD_ARG ESPANK_NOT_TASK @@ -170,31 +196,20 @@ cdef extern from "slurm/slurm_errno.h": ESPANK_NOT_EXECD ESPANK_NOT_AVAIL ESPANK_NOT_LOCAL - ESLURMD_PIPE_ERROR_ON_TASK_SPAWN ESLURMD_KILL_TASK_FAILED ESLURMD_KILL_JOB_ALREADY_COMPLETE ESLURMD_INVALID_ACCT_FREQ ESLURMD_INVALID_JOB_CREDENTIAL - ESLURMD_UID_NOT_FOUND - ESLURMD_GID_NOT_FOUND ESLURMD_CREDENTIAL_EXPIRED ESLURMD_CREDENTIAL_REVOKED ESLURMD_CREDENTIAL_REPLAYED ESLURMD_CREATE_BATCH_DIR_ERROR - ESLURMD_MODIFY_BATCH_DIR_ERROR - ESLURMD_CREATE_BATCH_SCRIPT_ERROR - ESLURMD_MODIFY_BATCH_SCRIPT_ERROR ESLURMD_SETUP_ENVIRONMENT_ERROR - ESLURMD_SHARED_MEMORY_ERROR ESLURMD_SET_UID_OR_GID_ERROR - ESLURMD_SET_SID_ERROR - ESLURMD_CANNOT_SPAWN_IO_THREAD - ESLURMD_FORK_FAILED ESLURMD_EXECVE_FAILED ESLURMD_IO_ERROR ESLURMD_PROLOG_FAILED ESLURMD_EPILOG_FAILED - ESLURMD_SESSION_KILLED ESLURMD_TOOMANYSTEPS ESLURMD_STEP_EXISTS ESLURMD_JOB_NOTRUNNING @@ -203,24 +218,15 @@ cdef extern from "slurm/slurm_errno.h": ESLURMD_INVALID_SOCKET_NAME_LEN ESLURMD_CONTAINER_RUNTIME_INVALID ESLURMD_CPU_BIND_ERROR - ESCRIPT_CHDIR_FAILED - ESCRIPT_OPEN_OUTPUT_FAILED - ESCRIPT_NON_ZERO_RETURN - SLURM_PROTOCOL_SOCKET_IMPL_ZERO_RECV_LENGTH - SLURM_PROTOCOL_SOCKET_IMPL_NEGATIVE_RECV_LENGTH - SLURM_PROTOCOL_SOCKET_IMPL_NOT_ALL_DATA_SENT + ESLURMD_CPU_LAYOUT_ERROR ESLURM_PROTOCOL_INCOMPLETE_PACKET SLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT SLURM_PROTOCOL_SOCKET_ZERO_BYTES_SENT ESLURM_AUTH_CRED_INVALID - ESLURM_AUTH_FOPEN_ERROR - ESLURM_AUTH_NET_ERROR - ESLURM_AUTH_UNABLE_TO_SIGN ESLURM_AUTH_BADARG - ESLURM_AUTH_MEMORY - ESLURM_AUTH_INVALID ESLURM_AUTH_UNPACK ESLURM_AUTH_SKIP + ESLURM_AUTH_UNABLE_TO_GENERATE_TOKEN ESLURM_DB_CONNECTION ESLURM_JOBS_RUNNING_ON_ASSOC ESLURM_CLUSTER_DELETED @@ -242,25 +248,44 @@ cdef extern from "slurm/slurm_errno.h": ESLURM_INVALID_KNL ESLURM_PLUGIN_INVALID ESLURM_PLUGIN_INCOMPLETE + ESLURM_PLUGIN_NOT_LOADED ESLURM_REST_INVALID_QUERY ESLURM_REST_FAIL_PARSING ESLURM_REST_INVALID_JOBS_DESC ESLURM_REST_EMPTY_RESULT + ESLURM_REST_MISSING_UID + ESLURM_REST_MISSING_GID ESLURM_DATA_PATH_NOT_FOUND ESLURM_DATA_PTR_NULL ESLURM_DATA_CONV_FAILED ESLURM_DATA_REGEX_COMPILE ESLURM_DATA_UNKNOWN_MIME_TYPE ESLURM_DATA_TOO_LARGE + ESLURM_DATA_FLAGS_INVALID_TYPE + ESLURM_DATA_FLAGS_INVALID + ESLURM_DATA_EXPECTED_LIST + ESLURM_DATA_EXPECTED_DICT + ESLURM_DATA_AMBIGUOUS_MODIFY + ESLURM_DATA_AMBIGUOUS_QUERY + ESLURM_DATA_PARSE_NOTHING ESLURM_CONTAINER_NOT_CONFIGURED + ctypedef struct slurm_errtab_t: + int xe_number + char* xe_name + char* xe_message + + slurm_errtab_t slurm_errtab[] + + unsigned int slurm_errtab_size + char* slurm_strerror(int errnum) void slurm_seterrno(int errnum) int slurm_get_errno() - void slurm_perror(char* msg) + void slurm_perror(const char* msg) cdef extern from "slurm/slurm.h": @@ -281,6 +306,7 @@ cdef extern from "slurm/slurm.h": uint64_t NO_CONSUME_VAL64 uint16_t MAX_TASKS_PER_NODE uint32_t MAX_JOB_ID + uint8_t MAX_HET_JOB_COMPONENTS uint8_t MAX_FED_CLUSTERS uint32_t SLURM_MAX_NORMAL_STEP_ID uint32_t SLURM_PENDING_STEP @@ -350,6 +376,9 @@ cdef extern from "slurm/slurm.h": uint16_t SLURM_DIST_NODESOCKMASK uint8_t OPEN_MODE_APPEND uint8_t OPEN_MODE_TRUNCATE + uint8_t CPU_BIND_T_TO_MASK + uint32_t CPU_BIND_T_AUTO_TO_MASK + uint16_t CPU_BIND_T_MASK uint32_t CPU_FREQ_RANGE_FLAG uint32_t CPU_FREQ_LOW uint32_t CPU_FREQ_MEDIUM @@ -362,6 +391,8 @@ cdef extern from "slurm/slurm.h": uint32_t CPU_FREQ_USERSPACE uint32_t CPU_FREQ_SCHEDUTIL uint32_t CPU_FREQ_GOV_MASK + uint8_t MEM_BIND_TYPE_MASK + uint8_t MEM_BIND_TYPE_FLAGS_MASK uint8_t NODE_STATE_BASE uint32_t NODE_STATE_FLAGS uint8_t NODE_STATE_NET @@ -415,7 +446,6 @@ cdef extern from "slurm/slurm.h": uint8_t PRIVATE_DATA_USERS uint8_t PRIVATE_DATA_ACCOUNTS uint8_t PRIVATE_DATA_RESERVATIONS - uint8_t PRIVATE_CLOUD_NODES uint16_t PRIVATE_DATA_EVENTS uint8_t PRIORITY_RESET_NONE uint8_t PRIORITY_RESET_NOW @@ -471,7 +501,10 @@ cdef extern from "slurm/slurm.h": uint32_t JOB_PART_ASSIGNED uint64_t BACKFILL_SCHED uint64_t BACKFILL_LAST + uint64_t TASKS_CHANGED uint64_t JOB_SEND_SCRIPT + uint64_t RESET_LIC_TASK + uint64_t RESET_LIC_JOB uint8_t X11_FORWARD_ALL uint8_t X11_FORWARD_BATCH uint8_t X11_FORWARD_FIRST @@ -493,6 +526,7 @@ cdef extern from "slurm/slurm.h": uint8_t PART_FLAG_REQ_RESV uint8_t PART_FLAG_LLN uint8_t PART_FLAG_EXCLUSIVE_USER + uint8_t PART_FLAG_PDOI uint16_t PART_FLAG_DEFAULT_CLR uint16_t PART_FLAG_HIDDEN_CLR uint16_t PART_FLAG_NO_ROOT_CLR @@ -500,6 +534,7 @@ cdef extern from "slurm/slurm.h": uint16_t PART_FLAG_REQ_RESV_CLR uint16_t PART_FLAG_LLN_CLR uint16_t PART_FLAG_EXC_USER_CLR + uint16_t PART_FLAG_PDOI_CLR uint8_t RESERVE_FLAG_MAINT uint8_t RESERVE_FLAG_NO_MAINT uint8_t RESERVE_FLAG_DAILY @@ -580,7 +615,7 @@ cdef extern from "slurm/slurm.h": uint64_t DEBUG_FLAG_TIME_CRAY uint64_t DEBUG_FLAG_DB_ARCHIVE uint64_t DEBUG_FLAG_DB_TRES - uint64_t DEBUG_FLAG_ESEARCH + uint64_t DEBUG_FLAG_JOBCOMP uint64_t DEBUG_FLAG_NODE_FEATURES uint64_t DEBUG_FLAG_FEDR uint64_t DEBUG_FLAG_HETJOB @@ -599,9 +634,11 @@ cdef extern from "slurm/slurm.h": uint16_t PREEMPT_MODE_GANG uint8_t RECONFIG_KEEP_PART_INFO uint8_t RECONFIG_KEEP_PART_STAT + uint8_t RECONFIG_KEEP_POWER_SAVE_SETTINGS uint8_t HEALTH_CHECK_NODE_IDLE uint8_t HEALTH_CHECK_NODE_ALLOC uint8_t HEALTH_CHECK_NODE_MIXED + uint8_t HEALTH_CHECK_NODE_NONDRAINED_IDLE uint16_t HEALTH_CHECK_CYCLE uint8_t HEALTH_CHECK_NODE_ANY uint8_t PROLOG_FLAG_ALLOC @@ -610,6 +647,7 @@ cdef extern from "slurm/slurm.h": uint8_t PROLOG_FLAG_SERIAL uint8_t PROLOG_FLAG_X11 uint8_t PROLOG_FLAG_DEFER_BATCH + uint8_t PROLOG_FLAG_FORCE_REQUEUE_ON_FAIL uint8_t CTL_CONF_OR uint8_t CTL_CONF_SJC uint8_t CTL_CONF_DRJ @@ -618,6 +656,7 @@ cdef extern from "slurm/slurm.h": uint8_t CTL_CONF_WCKEY uint8_t CTL_CONF_IPV4_ENABLED uint8_t CTL_CONF_IPV6_ENABLED + uint16_t CTL_CONF_SJX uint16_t CTL_CONF_SJS uint16_t CTL_CONF_SJE uint8_t LOG_FMT_ISO8601_MS @@ -627,6 +666,7 @@ cdef extern from "slurm/slurm.h": uint8_t LOG_FMT_CLOCK uint8_t LOG_FMT_SHORT uint8_t LOG_FMT_THREAD_ID + uint8_t LOG_FMT_RFC3339 uint8_t STAT_COMMAND_RESET uint8_t STAT_COMMAND_GET uint8_t TRIGGER_FLAG_PERM @@ -657,6 +697,8 @@ cdef extern from "slurm/slurm.h": uint32_t TRIGGER_TYPE_PRI_DB_FAIL uint32_t TRIGGER_TYPE_PRI_DB_RES_OP uint32_t TRIGGER_TYPE_BURST_BUFFER + uint32_t TRIGGER_TYPE_DRAINING + uint32_t TRIGGER_TYPE_RESUME uint8_t ASSOC_MGR_INFO_FLAG_ASSOC uint8_t ASSOC_MGR_INFO_FLAG_USERS uint8_t ASSOC_MGR_INFO_FLAG_QOS @@ -669,6 +711,7 @@ cdef extern from "slurm/slurm.h": uint8_t KILL_OOM uint8_t KILL_NO_SIBS uint16_t KILL_JOB_RESV + uint16_t KILL_NO_CRON uint16_t WARN_SENT uint8_t BB_FLAG_DISABLE_PERSISTENT uint8_t BB_FLAG_ENABLE_PERSISTENT @@ -769,8 +812,8 @@ cdef extern from "slurm/slurm.h": WAIT_QOS_JOB_LIMIT WAIT_QOS_RESOURCE_LIMIT WAIT_QOS_TIME_LIMIT - WAIT_BLOCK_MAX_ERR - WAIT_BLOCK_D_ACTION + FAIL_SIGNAL + DEFUNCT_WAIT_34 WAIT_CLEANING WAIT_PROLOG WAIT_QOS @@ -970,11 +1013,7 @@ cdef extern from "slurm/slurm.h": SWITCH_PLUGIN_SLINGSHOT cdef enum select_jobdata_type: - SELECT_JOBDATA_PAGG_ID - SELECT_JOBDATA_PTR - SELECT_JOBDATA_CLEANING SELECT_JOBDATA_NETWORK - SELECT_JOBDATA_RELEASED cdef enum select_nodedata_type: SELECT_NODEDATA_SUBCNT @@ -1034,6 +1073,11 @@ cdef extern from "slurm/slurm.h": ENERGY_DATA_NODE_ENERGY_UP ENERGY_DATA_STEP_PTR + ctypedef enum update_mode_t: + UPDATE_SET + UPDATE_ADD + UPDATE_REMOVE + cdef enum task_dist_states: SLURM_DIST_CYCLIC SLURM_DIST_BLOCK @@ -1136,23 +1180,27 @@ cdef extern from "slurm/slurm.h": SSF_MEM_ZERO SSF_OVERLAP_FORCE - void slurm_init(char* conf) + void slurm_init(const char* conf) void slurm_fini() + void slurm_client_init_plugins() + + void slurm_client_fini_plugins() + ctypedef hostlist* hostlist_t - hostlist_t slurm_hostlist_create(char* hostlist) + hostlist_t slurm_hostlist_create(const char* hostlist) int slurm_hostlist_count(hostlist_t hl) void slurm_hostlist_destroy(hostlist_t hl) - int slurm_hostlist_find(hostlist_t hl, char* hostname) + int slurm_hostlist_find(hostlist_t hl, const char* hostname) - int slurm_hostlist_push(hostlist_t hl, char* hosts) + int slurm_hostlist_push(hostlist_t hl, const char* hosts) - int slurm_hostlist_push_host(hostlist_t hl, char* host) + int slurm_hostlist_push_host(hostlist_t hl, const char* host) ssize_t slurm_hostlist_ranged_string(hostlist_t hl, size_t n, char* buf) @@ -1166,8 +1214,12 @@ cdef extern from "slurm/slurm.h": ctypedef xlist* List + ctypedef xlist list_t + ctypedef listIterator* ListIterator + ctypedef listIterator list_itr_t + ctypedef void (*ListDelF)(void* x) ctypedef int (*ListCmpF)(void* x, void* y) @@ -1176,29 +1228,29 @@ cdef extern from "slurm/slurm.h": ctypedef int (*ListForF)(void* x, void* arg) - void* slurm_list_append(List l, void* x) + void slurm_list_append(list_t* l, void* x) - int slurm_list_count(List l) + int slurm_list_count(list_t* l) - List slurm_list_create(ListDelF f) + list_t* slurm_list_create(ListDelF f) - void slurm_list_destroy(List l) + void slurm_list_destroy(list_t* l) - void* slurm_list_find(ListIterator i, ListFindF f, void* key) + void* slurm_list_find(list_itr_t* i, ListFindF f, void* key) - int slurm_list_is_empty(List l) + int slurm_list_is_empty(list_t* l) - ListIterator slurm_list_iterator_create(List l) + list_itr_t* slurm_list_iterator_create(list_t* l) - void slurm_list_iterator_reset(ListIterator i) + void slurm_list_iterator_reset(list_itr_t* i) - void slurm_list_iterator_destroy(ListIterator i) + void slurm_list_iterator_destroy(list_itr_t* i) - void* slurm_list_next(ListIterator i) + void* slurm_list_next(list_itr_t* i) - void slurm_list_sort(List l, ListCmpF f) + void slurm_list_sort(list_t* l, ListCmpF f) - void* slurm_list_pop(List l) + void* slurm_list_pop(list_t* l) ctypedef int64_t bitstr_t @@ -1261,6 +1313,7 @@ cdef extern from "slurm/slurm.h": char* comment uint16_t contiguous char* container + char* container_id uint16_t core_spec char* cpu_bind uint16_t cpu_bind_type @@ -1276,8 +1329,8 @@ cdef extern from "slurm/slurm.h": char** environment slurm_hash_t env_hash uint32_t env_size - char* extra char* exc_nodes + char* extra char* features uint64_t fed_siblings_active uint64_t fed_siblings_viable @@ -1286,8 +1339,10 @@ cdef extern from "slurm/slurm.h": uint16_t immediate uint32_t job_id char* job_id_str + char* job_size_str uint16_t kill_on_node_fail char* licenses + char* licenses_tot uint16_t mail_type char* mail_user char* mcs_label @@ -1358,7 +1413,6 @@ cdef extern from "slurm/slurm.h": uint32_t pn_min_tmp_disk char* req_context uint32_t req_switch - dynamic_plugin_data_t* select_jobinfo char* selinux_context char* std_err char* std_in @@ -1397,6 +1451,7 @@ cdef extern from "slurm/slurm.h": char* command char* comment char* container + char* container_id uint16_t contiguous uint16_t core_spec uint16_t cores_per_socket @@ -1416,6 +1471,8 @@ cdef extern from "slurm/slurm.h": char* exc_nodes int32_t* exc_node_inx uint32_t exit_code + char* extra + char* failed_node char* features char* fed_origin_str uint64_t fed_siblings_active @@ -1431,6 +1488,7 @@ cdef extern from "slurm/slurm.h": uint32_t het_job_offset uint32_t job_id job_resources_t* job_resrcs + char* job_size_str uint32_t job_state time_t last_sched_eval char* licenses @@ -1474,7 +1532,6 @@ cdef extern from "slurm/slurm.h": uint16_t restart_cnt char* resv_name char* sched_nodes - dynamic_plugin_data_t* select_jobinfo char* selinux_context uint16_t shared uint16_t show_flags @@ -1484,7 +1541,7 @@ cdef extern from "slurm/slurm.h": time_t start_time uint16_t start_protocol_ver char* state_desc - uint16_t state_reason + uint32_t state_reason char* std_err char* std_in char* std_out @@ -1512,29 +1569,34 @@ cdef extern from "slurm/slurm.h": ctypedef slurm_job_info_t job_info_t - cdef struct priority_factors_object: - char* cluster_name - uint32_t job_id - char* partition - uint32_t user_id + ctypedef struct priority_factors_t: + uint32_t nice double priority_age double priority_assoc double priority_fs double priority_js double priority_part double priority_qos - double direct_prio uint32_t priority_site double* priority_tres uint32_t tres_cnt char** tres_names double* tres_weights - uint32_t nice + + cdef struct priority_factors_object: + char* account + char* cluster_name + double direct_prio + uint32_t job_id + char* partition + priority_factors_t* prio_factors + char* qos + uint32_t user_id ctypedef priority_factors_object priority_factors_object_t cdef struct priority_factors_response_msg: - List priority_factors_list + list_t* priority_factors_list ctypedef priority_factors_response_msg priority_factors_response_msg_t @@ -1553,6 +1615,12 @@ cdef extern from "slurm/slurm.h": ctypedef step_update_request_msg step_update_request_msg_t + cdef struct suspend_exc_update_msg: + char* update_str + update_mode_t mode + + ctypedef suspend_exc_update_msg suspend_exc_update_msg_t + ctypedef struct slurm_step_layout_req_t: char* node_list uint16_t* cpus_per_node @@ -1700,7 +1768,6 @@ cdef extern from "slurm/slurm.h": char* remote_error_filename char* remote_input_filename slurm_step_io_fds_t local_fds - uint32_t gid bool multi_prog bool no_alloc uint32_t slurmd_debug @@ -1800,6 +1867,7 @@ cdef extern from "slurm/slurm.h": uint32_t array_task_id char* cluster char* container + char* container_id uint32_t cpu_freq_min uint32_t cpu_freq_max uint32_t cpu_freq_gov @@ -1814,7 +1882,6 @@ cdef extern from "slurm/slurm.h": char* partition char* resv_ports time_t run_time - dynamic_plugin_data_t* select_jobinfo char* srun_host uint32_t srun_pid time_t start_time @@ -1846,7 +1913,7 @@ cdef extern from "slurm/slurm.h": uint32_t pid_cnt ctypedef struct job_step_pids_response_msg_t: - List pid_list + list_t* pid_list slurm_step_id_t step_id ctypedef struct job_step_stat_t: @@ -1856,7 +1923,7 @@ cdef extern from "slurm/slurm.h": job_step_pids_t* step_pids ctypedef struct job_step_stat_response_msg_t: - List stats_list + list_t* stats_list slurm_step_id_t step_id cdef struct node_info: @@ -1899,6 +1966,8 @@ cdef extern from "slurm/slurm.h": char* reason time_t reason_time uint32_t reason_uid + time_t resume_after + char* resv_name dynamic_plugin_data_t* select_nodeinfo time_t slurmd_start_time uint16_t sockets @@ -2003,9 +2072,10 @@ cdef extern from "slurm/slurm.h": char* deny_qos uint16_t flags uint32_t grace_time - List job_defaults_list + list_t* job_defaults_list char* job_defaults_str uint32_t max_cpus_per_node + uint32_t max_cpus_per_socket uint64_t max_mem_per_cpu uint32_t max_nodes uint16_t max_share @@ -2048,6 +2118,8 @@ cdef extern from "slurm/slurm.h": uint32_t env_size char** environment uint32_t error_code + gid_t gid + char* group_name char* job_submit_user_msg slurm_addr_t* node_addr uint32_t node_cnt @@ -2061,7 +2133,9 @@ cdef extern from "slurm/slurm.h": uint64_t pn_min_memory char* qos char* resv_name - dynamic_plugin_data_t* select_jobinfo + char* tres_per_node + uid_t uid + char* user_name void* working_cluster_rec ctypedef resource_allocation_response_msg resource_allocation_response_msg_t @@ -2078,7 +2152,7 @@ cdef extern from "slurm/slurm.h": char* job_submit_user_msg char* node_list char* part_name - List preemptee_job_id + list_t* preemptee_job_id uint32_t proc_cnt time_t start_time double sys_usage_per @@ -2094,6 +2168,7 @@ cdef extern from "slurm/slurm.h": cdef struct reserve_info: char* accounts char* burst_buffer + char* comment uint32_t core_cnt uint32_t core_spec_cnt resv_core_spec_t* core_spec @@ -2126,6 +2201,7 @@ cdef extern from "slurm/slurm.h": cdef struct resv_desc_msg: char* accounts char* burst_buffer + char* comment uint32_t* core_cnt uint32_t duration time_t end_time @@ -2210,6 +2286,7 @@ cdef extern from "slurm/slurm.h": char* fed_params uint32_t first_job_id uint16_t fs_dampening_factor + uint16_t getnameinfo_cache_timeout uint16_t get_env_timeout char* gres_plugins uint16_t group_time @@ -2235,7 +2312,7 @@ cdef extern from "slurm/slurm.h": char* job_container_plugin char* job_credential_private_key char* job_credential_public_certificate - List job_defaults_list + list_t* job_defaults_list uint16_t job_file_append uint16_t job_requeue char* job_submit_plugins @@ -2245,12 +2322,12 @@ cdef extern from "slurm/slurm.h": uint16_t kill_on_bad_exit uint16_t kill_wait char* launch_params - char* launch_type char* licenses uint16_t log_fmt char* mail_domain char* mail_prog uint32_t max_array_sz + uint32_t max_batch_requeue uint32_t max_dbd_msgs uint32_t max_job_cnt uint32_t max_job_id @@ -2276,6 +2353,7 @@ cdef extern from "slurm/slurm.h": char* power_plugin uint32_t preempt_exempt_time uint16_t preempt_mode + char* preempt_params char* preempt_type char* prep_params char* prep_plugins @@ -2336,8 +2414,6 @@ cdef extern from "slurm/slurm.h": uint16_t slurmctld_debug char* slurmctld_logfile char* slurmctld_pidfile - char* slurmctld_plugstack - void* slurmctld_plugstack_conf uint32_t slurmctld_port uint16_t slurmctld_port_count char* slurmctld_primary_off_prog @@ -2359,6 +2435,7 @@ cdef extern from "slurm/slurm.h": char* state_save_location char* suspend_exc_nodes char* suspend_exc_parts + char* suspend_exc_states char* suspend_program uint16_t suspend_rate uint32_t suspend_time @@ -2421,6 +2498,7 @@ cdef extern from "slurm/slurm.h": uint32_t node_state char* reason uint32_t reason_uid + uint32_t resume_after uint32_t weight ctypedef slurm_update_node_msg update_node_msg_t @@ -2540,6 +2618,9 @@ cdef extern from "slurm/slurm.h": uint32_t available uint8_t remote uint32_t reserved + uint32_t last_consumed + uint32_t last_deficit + time_t last_update ctypedef slurm_license_info slurm_license_info_t @@ -2554,19 +2635,20 @@ cdef extern from "slurm/slurm.h": uint32_t job_array_count char** job_array_id uint32_t* error_code + char** err_msg ctypedef struct assoc_mgr_info_msg_t: - List assoc_list - List qos_list + list_t* assoc_list + list_t* qos_list uint32_t tres_cnt char** tres_names - List user_list + list_t* user_list ctypedef struct assoc_mgr_info_request_msg_t: - List acct_list + list_t* acct_list uint32_t flags - List qos_list - List user_list + list_t* qos_list + list_t* user_list cdef struct network_callerid_msg: unsigned char ip_src[16] @@ -2583,27 +2665,27 @@ cdef extern from "slurm/slurm.h": ctypedef void (*_slurm_allocate_resources_blocking_pending_callback_ft)(uint32_t job_id) - resource_allocation_response_msg_t* slurm_allocate_resources_blocking(job_desc_msg_t* user_req, time_t timeout, _slurm_allocate_resources_blocking_pending_callback_ft pending_callback) + resource_allocation_response_msg_t* slurm_allocate_resources_blocking(const job_desc_msg_t* user_req, time_t timeout, _slurm_allocate_resources_blocking_pending_callback_ft pending_callback) void slurm_free_resource_allocation_response_msg(resource_allocation_response_msg_t* msg) ctypedef void (*_slurm_allocate_het_job_blocking_pending_callback_ft)(uint32_t job_id) - List slurm_allocate_het_job_blocking(List job_req_list, time_t timeout, _slurm_allocate_het_job_blocking_pending_callback_ft pending_callback) + list_t* slurm_allocate_het_job_blocking(list_t* job_req_list, time_t timeout, _slurm_allocate_het_job_blocking_pending_callback_ft pending_callback) int slurm_allocation_lookup(uint32_t job_id, resource_allocation_response_msg_t** resp) - int slurm_het_job_lookup(uint32_t jobid, List* resp) + int slurm_het_job_lookup(uint32_t jobid, list_t** resp) - char* slurm_read_hostfile(char* filename, int n) + char* slurm_read_hostfile(const char* filename, int n) - allocation_msg_thread_t* slurm_allocation_msg_thr_create(uint16_t* port, slurm_allocation_callbacks_t* callbacks) + allocation_msg_thread_t* slurm_allocation_msg_thr_create(uint16_t* port, const slurm_allocation_callbacks_t* callbacks) void slurm_allocation_msg_thr_destroy(allocation_msg_thread_t* msg_thr) int slurm_submit_batch_job(job_desc_msg_t* job_desc_msg, submit_response_msg_t** slurm_alloc_msg) - int slurm_submit_batch_het_job(List job_req_list, submit_response_msg_t** slurm_alloc_msg) + int slurm_submit_batch_het_job(list_t* job_req_list, submit_response_msg_t** slurm_alloc_msg) void slurm_free_submit_response_response_msg(submit_response_msg_t* msg) @@ -2611,7 +2693,7 @@ cdef extern from "slurm/slurm.h": int slurm_job_will_run(job_desc_msg_t* job_desc_msg) - int slurm_het_job_will_run(List job_req_list) + int slurm_het_job_will_run(list_t* job_req_list) int slurm_job_will_run2(job_desc_msg_t* req, will_run_response_msg_t** will_run_resp) @@ -2644,7 +2726,7 @@ cdef extern from "slurm/slurm.h": int slurm_kill_job_step(uint32_t job_id, uint32_t step_id, uint16_t signal) - int slurm_kill_job2(char* job_id, uint16_t signal, uint16_t flags, char* sibling) + int slurm_kill_job2(const char* job_id, uint16_t signal, uint16_t flags, const char* sibling) int slurm_signal_job(uint32_t job_id, uint16_t signal) @@ -2656,9 +2738,9 @@ cdef extern from "slurm/slurm.h": void slurm_step_launch_params_t_init(slurm_step_launch_params_t* ptr) - int slurm_step_launch(slurm_step_ctx_t* ctx, slurm_step_launch_params_t* params, slurm_step_launch_callbacks_t* callbacks) + int slurm_step_launch(slurm_step_ctx_t* ctx, const slurm_step_launch_params_t* params, const slurm_step_launch_callbacks_t* callbacks) - int slurm_step_launch_add(slurm_step_ctx_t* ctx, slurm_step_ctx_t* first_ctx, slurm_step_launch_params_t* params, char* node_list) + int slurm_step_launch_add(slurm_step_ctx_t* ctx, slurm_step_ctx_t* first_ctx, const slurm_step_launch_params_t* params, char* node_list) int slurm_step_launch_wait_start(slurm_step_ctx_t* ctx) @@ -2698,11 +2780,11 @@ cdef extern from "slurm/slurm.h": int slurm_job_cpus_allocated_on_node_id(job_resources_t* job_resrcs_ptr, int node_id) - int slurm_job_cpus_allocated_on_node(job_resources_t* job_resrcs_ptr, char* node_name) + int slurm_job_cpus_allocated_on_node(job_resources_t* job_resrcs_ptr, const char* node_name) int slurm_job_cpus_allocated_str_on_node_id(char* cpus, size_t cpus_len, job_resources_t* job_resrcs_ptr, int node_id) - int slurm_job_cpus_allocated_str_on_node(char* cpus, size_t cpus_len, job_resources_t* job_resrcs_ptr, char* node_name) + int slurm_job_cpus_allocated_str_on_node(char* cpus, size_t cpus_len, job_resources_t* job_resrcs_ptr, const char* node_name) void slurm_free_job_info_msg(job_info_msg_t* job_buffer_ptr) @@ -2722,7 +2804,7 @@ cdef extern from "slurm/slurm.h": int slurm_load_job(job_info_msg_t** resp, uint32_t job_id, uint16_t show_flags) - int slurm_load_job_prio(priority_factors_response_msg_t** factors_resp, List job_id_list, char* partitions, List uid_list, uint16_t show_flags) + int slurm_load_job_prio(priority_factors_response_msg_t** factors_resp, uint16_t show_flags) int slurm_load_job_user(job_info_msg_t** job_info_msg_pptr, uint32_t user_id, uint16_t show_flags) @@ -2746,6 +2828,8 @@ cdef extern from "slurm/slurm.h": int slurm_get_job_steps(time_t update_time, uint32_t job_id, uint32_t step_id, job_step_info_response_msg_t** step_response_pptr, uint16_t show_flags) + int slurm_find_step_ids_by_container_id(uint16_t show_flags, uid_t uid, const char* container_id, list_t* steps) + void slurm_free_job_step_info_response_msg(job_step_info_response_msg_t* msg) void slurm_print_job_step_info_msg(FILE* out, job_step_info_response_msg_t* job_step_info_msg_ptr, int one_liner) @@ -2864,8 +2948,16 @@ cdef extern from "slurm/slurm.h": void slurm_free_reservation_info_msg(reserve_info_msg_t* resv_info_ptr) + ctypedef struct controller_ping_t: + char* hostname + bool pinged + long latency + int offset + int slurm_ping(int dest) + controller_ping_t* ping_all_controllers() + int slurm_reconfigure() int slurm_shutdown(uint16_t options) @@ -2874,12 +2966,22 @@ cdef extern from "slurm/slurm.h": int slurm_set_debugflags(uint64_t debug_flags_plus, uint64_t debug_flags_minus) + int slurm_set_slurmd_debug_flags(char* node_list, uint64_t debug_flags_plus, uint64_t debug_flags_minus) + + int slurm_set_slurmd_debug_level(char* node_list, uint32_t debug_level) + int slurm_set_debug_level(uint32_t debug_level) int slurm_set_schedlog_level(uint32_t schedlog_level) int slurm_set_fs_dampeningfactor(uint16_t factor) + int slurm_update_suspend_exc_nodes(char* nodes, update_mode_t mode) + + int slurm_update_suspend_exc_parts(char* parts, update_mode_t mode) + + int slurm_update_suspend_exc_states(char* states, update_mode_t mode) + int slurm_suspend(uint32_t job_id) int slurm_suspend2(char* job_id, job_array_resp_msg_t** resp) @@ -2995,9 +3097,10 @@ cdef extern from "slurm/slurm.h": char* failed_lines uint32_t* jobids uint32_t jobids_count + char* job_submit_user_msg uint32_t return_code - crontab_update_response_msg_t* slurm_update_crontab(uid_t uid, gid_t gid, char* crontab, List jobs) + crontab_update_response_msg_t* slurm_update_crontab(uid_t uid, gid_t gid, char* crontab, list_t* jobs) int slurm_remove_crontab(uid_t uid, gid_t gid) @@ -3021,6 +3124,7 @@ cdef extern from "slurm/slurmdb.h": uint32_t SLURMDB_RES_FLAG_NOTSET uint32_t SLURMDB_RES_FLAG_ADD uint32_t SLURMDB_RES_FLAG_REMOVE + uint8_t SLURMDB_RES_FLAG_ABSOLUTE uint32_t FEDERATION_FLAG_BASE uint32_t FEDERATION_FLAG_NOTSET uint32_t FEDERATION_FLAG_ADD @@ -3055,7 +3159,7 @@ cdef extern from "slurm/slurmdb.h": uint32_t SLURMDB_FS_USE_PARENT uint16_t SLURMDB_CLASSIFIED_FLAG uint8_t SLURMDB_CLASS_BASE - uint8_t CLUSTER_FLAG_A1 + uint8_t CLUSTER_FLAG_REGISTER uint8_t CLUSTER_FLAG_A2 uint8_t CLUSTER_FLAG_A3 uint8_t CLUSTER_FLAG_A4 @@ -3069,7 +3173,10 @@ cdef extern from "slurm/slurmdb.h": uint16_t CLUSTER_FLAG_FED uint16_t CLUSTER_FLAG_EXT uint8_t ASSOC_FLAG_DELETED + uint8_t ASSOC_FLAG_NO_UPDATE uint8_t SLURMDB_EVENT_COND_OPEN + uint8_t DB_CONN_FLAG_CLUSTER_DEL + uint8_t DB_CONN_FLAG_ROLLBACK cdef extern from "slurm/slurmdb.h": @@ -3286,6 +3393,7 @@ cdef extern from "slurm/slurmdb.h": slurmdb_assoc_rec* assoc_next_id slurmdb_bf_usage_t* bf_usage char* cluster + char* comment uint32_t def_qos_id uint16_t flags uint32_t grp_jobs @@ -3384,14 +3492,14 @@ cdef extern from "slurm/slurmdb.h": List accounting_list uint16_t classification time_t comm_fail_time - # slurm_addr_t control_addr incomplete type complaint + slurm_addr_t control_addr char* control_host uint32_t control_port uint16_t dimensions int* dim_size slurmdb_cluster_fed_t fed uint32_t flags - # pthread_mutex_t lock incomplete type complaint + pthread_mutex_t lock char* name char* nodes uint32_t plugin_id_select @@ -3412,7 +3520,7 @@ cdef extern from "slurm/slurmdb.h": ctypedef struct slurmdb_clus_res_rec_t: char* cluster - uint16_t percent_allowed + uint32_t allowed ctypedef struct slurmdb_coord_rec_t: char* name @@ -3476,6 +3584,8 @@ cdef extern from "slurm/slurmdb.h": time_t end char* env uint32_t exitcode + char* extra + char* failed_node uint32_t flags void* first_step_ptr uint32_t gid @@ -3484,6 +3594,7 @@ cdef extern from "slurm/slurmdb.h": uint32_t jobid char* jobname uint32_t lft + char* licenses char* mcs_label char* nodes char* partition @@ -3610,6 +3721,7 @@ cdef extern from "slurm/slurmdb.h": ctypedef struct slurmdb_reservation_rec_t: char* assocs char* cluster + char* comment uint64_t flags uint32_t id char* name @@ -3653,6 +3765,7 @@ cdef extern from "slurm/slurmdb.h": uint32_t user_cpu_usec ctypedef struct slurmdb_res_cond_t: + list_t* allowed_list List cluster_list List description_list uint32_t flags @@ -3660,22 +3773,23 @@ cdef extern from "slurm/slurmdb.h": List id_list List manager_list List name_list - List percent_list List server_list List type_list uint16_t with_deleted uint16_t with_clusters ctypedef struct slurmdb_res_rec_t: + uint32_t allocated + uint32_t last_consumed List clus_res_list slurmdb_clus_res_rec_t* clus_res_rec uint32_t count char* description uint32_t flags uint32_t id + time_t last_update char* manager char* name - uint16_t percent_used char* server uint32_t type @@ -3781,6 +3895,7 @@ cdef extern from "slurm/slurmdb.h": ctypedef struct slurmdb_hierarchical_rec_t: slurmdb_assoc_rec_t* assoc + char* key char* sort_name List children @@ -3924,7 +4039,7 @@ cdef extern from "slurm/slurmdb.h": int slurmdb_jobs_fix_runaway(void* db_conn, List jobs) - int slurmdb_jobcomp_init(char* jobcomp_loc) + int slurmdb_jobcomp_init() int slurmdb_jobcomp_fini() @@ -4135,3 +4250,4 @@ cdef extern from "slurm/slurmdb.h": List slurmdb_wckeys_modify(void* db_conn, slurmdb_wckey_cond_t* wckey_cond, slurmdb_wckey_rec_t* wckey) List slurmdb_wckeys_remove(void* db_conn, slurmdb_wckey_cond_t* wckey_cond) + diff --git a/setup.cfg b/setup.cfg index a7d6399b..17a6e9f3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,7 +5,7 @@ doc_files = README.md doc/ examples/ build_requires = python3-devel >= 3.6 - slurm-devel >= 22.05.0 + slurm-devel >= 23.02.0 requires = slurm use_bzip2 = 1 diff --git a/setup.py b/setup.py index 17e25bb5..796faa6a 100644 --- a/setup.py +++ b/setup.py @@ -17,8 +17,8 @@ # Keep in sync with pyproject.toml CYTHON_VERSION_MIN = "0.29.30" -SLURM_RELEASE = "22.5" -PYSLURM_PATCH_RELEASE = "1" +SLURM_RELEASE = "23.2" +PYSLURM_PATCH_RELEASE = "0" SLURM_SHARED_LIB = "libslurm.so" CURRENT_DIR = pathlib.Path(__file__).parent From 23d436e36ccc230555640d7bdbbede762f0c8684 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Wed, 12 Apr 2023 19:48:15 +0200 Subject: [PATCH 14/48] Revert usage of return type hints (#281) Mostly because this pretty much destroys syntax highlighting on github And use the return type within the "Returns" section in parenthesis. It must be with parenthesis to get it properly documented in mkdocs for now, see https://github.com/mkdocstrings/griffe/issues/137 --- pyslurm/pyslurm.pyx | 368 ++++++++++++++++++++++---------------------- 1 file changed, 184 insertions(+), 184 deletions(-) diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index bfcae7a7..adbed03e 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -272,11 +272,11 @@ ctypedef struct config_key_pair_t: # -def get_controllers() -> tuple: +def get_controllers(): """Get information about slurm controllers. Returns: - Name of primary controller, Name of backup controllers + (tuple): Name of primary controller, Name of backup controllers """ cdef: slurm.slurm_conf_t *slurm_ctl_conf_ptr = NULL @@ -303,14 +303,14 @@ def get_controllers() -> tuple: return control_machs -def is_controller(Host=None) -> str: +def is_controller(Host=None): """Return slurm controller status for host. Args: Host (str): Name of host to check Returns: - None, "primary" or "backup" + (str): None, "primary" or "backup" """ control_machs = get_controllers() if not Host: @@ -325,11 +325,11 @@ def is_controller(Host=None) -> str: return 'backup' -def slurm_api_version() -> tuple: +def slurm_api_version(): """Return the slurm API version number. Returns: - A tuple of version_major, version_minor, version_micro + (tuple): A tuple of version_major, version_minor, version_micro """ cdef long version = slurm.SLURM_VERSION_NUMBER @@ -338,11 +338,11 @@ def slurm_api_version() -> tuple: SLURM_VERSION_MICRO(version)) -def slurm_load_slurmd_status() -> str: +def slurm_load_slurmd_status(): """Issue RPC to get and load the status of Slurmd daemon. Returns: - Slurmd information + (str): Slurmd information """ cdef: dict Status = {}, Status_dict = {} @@ -397,11 +397,11 @@ def slurm_fini(): # Slurm Config Class # -def get_private_data_list(data) -> list: +def get_private_data_list(data): """Retrieve the enciphered Private Data configuration. Returns: - Private data + (list): Private data """ result = [] @@ -436,30 +436,30 @@ cdef class config: def __dealloc__(self): self.__free() - def lastUpdate(self) -> int: + def lastUpdate(self): """Get the time (epoch seconds) the retrieved data was updated. Returns: - Epoch seconds + (int): Epoch seconds """ return self._lastUpdate - def ids(self) -> dict: + def ids(self): """Return the config IDs from retrieved data. Returns: - Dictionary of config key IDs + (dict): Dictionary of config key IDs """ return self.__ConfigDict.keys() - def find_id(self, char *keyID='') -> dict: + def find_id(self, char *keyID=''): """Retrieve config ID data. Args: keyID (str): Config key string to search Returns: - Dictionary of values for given config key + (dict): Dictionary of values for given config key """ return self.__ConfigDict.get(keyID, {}) @@ -494,11 +494,11 @@ cdef class config: self.__Config_ptr = slurm_ctl_conf_ptr return errCode - def key_pairs(self) -> dict: + def key_pairs(self): """Return a dict of the slurm control data as key pairs. Returns: - Dictionary of slurm key-pair values + (dict): Dictionary of slurm key-pair values """ cdef: void *ret_list = NULL @@ -533,11 +533,11 @@ cdef class config: return keyDict - def get(self) -> dict: + def get(self): """Return the slurm control configuration information. Returns: - Configuration data + (dict): Configuration data """ self.__load() self.__get() @@ -828,19 +828,19 @@ cdef class partition: def __dealloc__(self): pass - def lastUpdate(self) -> int: + def lastUpdate(self): """Return time (epoch seconds) the partition data was updated. Returns: - Epoch seconds + (int): Epoch seconds """ return self._lastUpdate - def ids(self) -> dict: + def ids(self): """Return the partition IDs from retrieved data. Returns: - Dictionary of partition IDs + (dict): Dictionary of partition IDs """ cdef: int rc @@ -864,18 +864,18 @@ cdef class partition: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def find_id(self, partID) -> dict: + def find_id(self, partID): """Get partition information for a given partition. Args: partID (str): Partition key string to search Returns: - Dictionary of values for given partition + (dict): Dictionary of values for given partition """ return self.get().get(partID) - def find(self, name='', val='') -> list: + def find(self, name='', val=''): """Search for a property and associated value in the retrieved partition data. Args: @@ -883,7 +883,7 @@ cdef class partition: val (str): value string to match Returns: - List of IDs that match + (list): List of IDs that match """ cdef: list retList = [] @@ -921,14 +921,15 @@ cdef class partition: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def delete(self, PartID) -> int: + def delete(self, PartID): """Delete a give slurm partition. Args: PartID (str): Name of slurm partition Returns: - 0 for success else set the slurm error code as appropriately. + (int): 0 for success else set the slurm error code as + appropriately. """ cdef: slurm.delete_part_msg_t part_msg @@ -946,11 +947,11 @@ cdef class partition: return errCode - def get(self) -> dict: + def get(self): """Get all slurm partition information Returns: - Dictionary of dictionaries whose key is the partition name. + (dict): Dictionary of dictionaries whose key is the partition name. """ cdef: int rc @@ -1124,7 +1125,7 @@ cdef class partition: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def update(self, dict Partition_dict) -> int: + def update(self, dict Partition_dict): """Update a slurm partition. Args: @@ -1132,13 +1133,13 @@ cdef class partition: one is created by create_partition_dict Returns: - 0 for success, -1 for error, and the slurm error code is set + (int): 0 for success, -1 for error, and the slurm error code is set appropriately. """ cdef int errCode = slurm_update_partition(Partition_dict) return errCode - def create(self, dict Partition_dict) -> int: + def create(self, dict Partition_dict): """Create a slurm partition. Args: @@ -1146,19 +1147,19 @@ cdef class partition: one can be created by create_partition_dict Returns: - 0 for success or -1 for error, and the slurm error code is set - appropriately. + (int): 0 for success or -1 for error, and the slurm error code is + set appropriately. """ cdef int errCode = slurm_create_partition(Partition_dict) return errCode -def create_partition_dict() -> dict: +def create_partition_dict(): """Returns a dictionary that can be populated by the user and used for the update_partition and create_partition calls. Returns: - Empty reservation dictionary + (dict): Empty reservation dictionary """ return { 'Alternate': None, @@ -1179,7 +1180,7 @@ def create_partition_dict() -> dict: } -def slurm_create_partition(dict partition_dict) -> int: +def slurm_create_partition(dict partition_dict): """Create a slurm partition. Args: @@ -1187,7 +1188,7 @@ def slurm_create_partition(dict partition_dict) -> int: can be created by create_partition_dict Returns: - 0 for success or -1 for error, and the slurm error code is set + (int): 0 for success or -1 for error, and the slurm error code is set appropriately. """ cdef: @@ -1212,7 +1213,7 @@ def slurm_create_partition(dict partition_dict) -> int: return errCode -def slurm_update_partition(dict partition_dict) -> int: +def slurm_update_partition(dict partition_dict): """Update a slurm partition. Args: @@ -1220,7 +1221,7 @@ def slurm_update_partition(dict partition_dict) -> int: is created by create_partition_dict Returns: - 0 for success, -1 for error, and the slurm error code is set + (int): 0 for success, -1 for error, and the slurm error code is set appropriately. """ cdef: @@ -1279,14 +1280,14 @@ def slurm_update_partition(dict partition_dict) -> int: return errCode -def slurm_delete_partition(PartID) -> int: +def slurm_delete_partition(PartID): """Delete a slurm partition. Args: PartID (str): Name of slurm partition Returns: - 0 for success else set the slurm error code as appropriately. + (int): 0 for success else set the slurm error code as appropriately. """ cdef: slurm.delete_part_msg_t part_msg @@ -1758,27 +1759,27 @@ cdef class job: def __dealloc__(self): pass - def lastUpdate(self) -> int: + def lastUpdate(self): """Get the time (epoch seconds) the job data was updated. Returns: - Epoch seconds + (int): Epoch seconds """ return self._lastUpdate - def lastBackfill(self) -> int: + def lastBackfill(self): """Get the time (epoch seconds) of last backfilling run. Returns: - Epoch seconds + (int): Epoch seconds """ return self._lastBackfill - def ids(self) -> dict: + def ids(self): """Return the job IDs from retrieved data. Returns: - Dictionary of job IDs + (dict): Dictionary of job IDs """ cdef: int rc @@ -1799,7 +1800,7 @@ cdef class job: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def find(self, name='', val='') -> list: + def find(self, name='', val=''): """Search for a property and associated value in the retrieved job data. Args: @@ -1807,7 +1808,7 @@ cdef class job: val (str): value string to match Returns: - List of IDs that match + (list): List of IDs that match """ cdef: list retList = [] @@ -1854,7 +1855,7 @@ cdef class job: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def find_id(self, jobid) -> list: + def find_id(self, jobid): """Retrieve job ID data. This method accepts both string and integer formats of the jobid. @@ -1866,12 +1867,12 @@ cdef class job: jobid (str): Job id key string to search Returns: - List of dictionary of values for given job id + (list): List of dictionary of values for given job id """ self._load_single_job(jobid) return list(self.get_job_ptr().values()) - def find_user(self, user) -> dict: + def find_user(self, user): """Retrieve a user's job data. This method calls slurm_load_job_user to get all job_table records @@ -1881,7 +1882,7 @@ cdef class job: user (str): User string to search Returns: - Dictionary of values for all user's jobs + (dict): Dictionary of values for all user's jobs """ cdef: int apiError @@ -1904,15 +1905,15 @@ cdef class job: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def get(self) -> dict: + def get(self): """Get all slurm jobs information. This method calls slurm_load_jobs to get job_table records for all jobs Returns: - Data where key is the job name, each entry contains a dictionary - of job attributes + (dict): Data where key is the job name, each entry contains a + dictionary of job attributes """ cdef: int apiError @@ -2318,14 +2319,14 @@ cdef class job: return cpus_list - def __unrange(self, bit_str) -> list: + def __unrange(self, bit_str): """converts a string describing a bitmap (from slurm_job_cpus_allocated_str_on_node()) to a list. Args: bit_str (str): string describing a bitmap (e.g. "0-30,45,50-60") Returns: - List referring to bitmap (empty if not succesful) + (list): List referring to bitmap (empty if not succesful) """ r_list = [] @@ -2369,7 +2370,7 @@ cdef class job: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def slurm_job_batch_script(self, jobid) -> str: + def slurm_job_batch_script(self, jobid): """Return the contents of the batch-script for a Job. The string returned also includes all the "\\n" characters (new-line). @@ -2379,7 +2380,7 @@ cdef class job: be retrieved. Returns: - The content of the batch script. + (str): The content of the batch script. """ # This reimplements the slurm_job_batch_script API call. Otherwise we # would have to parse the FILE* ptr we get from it back into a @@ -2849,7 +2850,7 @@ cdef class job: req.wait_all_nodes = slurm.NO_VAL return rc - def submit_batch_job(self, job_opts) -> int: + def submit_batch_job(self, job_opts): """Submit batch job. Make sure options match sbatch command line opts and not struct member @@ -2859,7 +2860,7 @@ cdef class job: job_opts (dict): Job information. Returns: - The job id of the submitted job. + (int): The job id of the submitted job. """ cdef: slurm.job_desc_msg_t desc @@ -3018,7 +3019,7 @@ cdef class job: #return "Submitted batch job %s" % job_id return job_id - def wait_finished(self, jobid) -> int: + def wait_finished(self, jobid): """Block until the job given by the jobid finishes. This works for single jobs, as well as job arrays. @@ -3029,7 +3030,7 @@ cdef class job: jobid (the same as given by squeue) Returns: - The exit code of the slurm job. + (int): The exit code of the slurm job. """ exit_status = -9999 complete = False @@ -3159,25 +3160,25 @@ class SlurmError(Exception): # -def slurm_get_errno() -> int: +def slurm_get_errno(): """Return the slurm error as set by a slurm API call. Returns: - Current slurm error number + (int): Current slurm error number """ cdef int errNum = slurm.slurm_get_errno() return errNum -def slurm_strerror(int Errno=0) -> str: +def slurm_strerror(int Errno=0): """Return slurm error message represented by a given slurm error number. Args: Errno (int): slurm error number. Returns: - slurm error string + (str): slurm error string """ cdef char* errMsg = slurm.slurm_strerror(Errno) @@ -3230,19 +3231,19 @@ cdef class node: def __dealloc__(self): pass - def lastUpdate(self) -> int: + def lastUpdate(self): """Return last time (epoch seconds) the node data was updated. Returns: - Epoch seconds + (int): Epoch seconds """ return self._lastUpdate - def ids(self) -> dict: + def ids(self): """Return the node IDs from retrieved data. Returns: - Dictionary of node IDs + (dict): Dictionary of node IDs """ cdef: int rc @@ -3263,22 +3264,22 @@ cdef class node: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def find_id(self, nodeID) -> dict: + def find_id(self, nodeID): """Get node information for a given node. Args: nodeID (str): Node key string to search Returns: - Dictionary of values for given node + (dict): Dictionary of values for given node """ return list(self.get_node(nodeID).values())[0] - def get(self) -> dict: + def get(self): """Get all slurm node information. Returns: - Dictionary of dictionaries whose key is the node name. + (dict): Dictionary of dictionaries whose key is the node name. """ return self.get_node(None) @@ -3286,14 +3287,14 @@ cdef class node: if gres_str: return re.split(r',(?![^(]*\))', gres_str) - def get_node(self, nodeID) -> dict: + def get_node(self, nodeID): """Get single slurm node information. Args: nodeID (str): Node key string to search. Default NULL. Returns: - Dictionary of node info data. + (dict): Dictionary of node info data. """ cdef: int rc @@ -3516,7 +3517,7 @@ cdef class node: return self._NodeDict - def update(self, dict node_dict) -> int: + def update(self, dict node_dict): """Update slurm node information. Args: @@ -3524,8 +3525,8 @@ cdef class node: created by create_node_dict Returns: - 0 for success or -1 for error, and the slurm error code is set - appropriately. + (int): 0 for success or -1 for error, and the slurm error code is + set appropriately. """ return slurm_update_node(node_dict) @@ -3551,7 +3552,7 @@ cdef class node: raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) -def slurm_update_node(dict node_dict) -> int: +def slurm_update_node(dict node_dict): """Update slurm node information. Args: @@ -3559,7 +3560,7 @@ def slurm_update_node(dict node_dict) -> int: by create_node_dict Returns: - 0 for success or -1 for error, and the slurm error code is set + (int): 0 for success or -1 for error, and the slurm error code is set appropriately. """ cdef: @@ -3605,14 +3606,14 @@ def slurm_update_node(dict node_dict) -> int: return errCode -def create_node_dict() -> dict: +def create_node_dict(): """Return a an update_node dictionary This dictionary can be populated by the user and used for the update_node call. Returns: - Empty node dictionary + (dict): Empty node dictionary """ return { 'node_names': None, @@ -3654,11 +3655,11 @@ cdef class jobstep: self._ShowFlags = 0 self._JobStepDict = {} - def lastUpdate(self) -> int: + def lastUpdate(self): """Get the time (epoch seconds) the jobstep data was updated. Returns: - Epoch seconds + (int): Epoch seconds """ return self._lastUpdate @@ -3686,11 +3687,11 @@ cdef class jobstep: return retDict - def get(self) -> dict: + def get(self): """Get slurm jobstep information. Returns: - Data whose key is the jobstep ID. + (dict): Data whose key is the jobstep ID. """ self.__get() @@ -3818,7 +3819,7 @@ cdef class jobstep: self._JobStepDict = Steps - def layout(self, uint32_t JobID=0, uint32_t StepID=0) -> list: + def layout(self, uint32_t JobID=0, uint32_t StepID=0): """Get the slurm job step layout from a given job and step id. Args: @@ -3826,7 +3827,7 @@ cdef class jobstep: StepID (int): The id of the job step. Returns: - List of job step layout. + (list): List of job step layout. """ cdef: slurm.slurm_step_id_t step_id @@ -3909,14 +3910,14 @@ cdef class hostlist: def count(self): return slurm.slurm_hostlist_count(self.hl) - def get_list(self) -> list: + def get_list(self): """Get the list of hostnames composing the hostlist. For example with a hostlist created with "tux[1-3]" -> [ 'tux1', tux2', 'tux3' ]. Returns: - The list of hostnames in case of success or None on error. + (list): The list of hostnames in case of success or None on error. """ cdef: slurm.hostlist_t hlist = NULL @@ -4001,15 +4002,15 @@ cdef class hostlist: cdef class trigger: - def set(self, dict trigger_dict) -> int: + def set(self, dict trigger_dict): """Set or create a slurm trigger. Args: trigger_dict (dict): A populated dictionary of trigger information Returns: - 0 for success or -1 for error, and the slurm error code is set - appropriately. + (int): 0 for success or -1 for error, and the slurm error code is + set appropriately. """ cdef: slurm.trigger_info_t trigger_set @@ -4080,11 +4081,11 @@ cdef class trigger: return 0 - def get(self) -> dict: + def get(self): """Get the information on slurm triggers. Returns: - Dictionary, where keys are the trigger IDs + (dict): Dictionary, where keys are the trigger IDs """ cdef: slurm.trigger_info_msg_t *trigger_get = NULL @@ -4111,7 +4112,7 @@ cdef class trigger: return Triggers - def clear(self, TriggerID=0, UserID=slurm.NO_VAL, ID=0) -> int: + def clear(self, TriggerID=0, UserID=slurm.NO_VAL, ID=0): """Clear or remove a slurm trigger. Args: @@ -4120,7 +4121,7 @@ cdef class trigger: ID (str): Job Identifier Returns: - 0 for success or a slurm error code + (int): 0 for success or a slurm error code """ cdef: slurm.trigger_info_t trigger_clear @@ -4168,34 +4169,34 @@ cdef class reservation: def __dealloc__(self): self.__free() - def lastUpdate(self) -> int: + def lastUpdate(self): """Get the time (epoch seconds) the reservation data was updated. Returns: - epoch seconds + (int): epoch seconds """ return self._lastUpdate - def ids(self) -> dict: + def ids(self): """Return a list of reservation IDs from retrieved data. Returns: - Dictionary of reservation IDs + (dict): Dictionary of reservation IDs """ return self._ResDict.keys() - def find_id(self, resID) -> dict: + def find_id(self, resID): """Retrieve reservation ID data. Args: resID (str): Reservation key string to search Returns: - Dictionary of values for given reservation key + (dict): Dictionary of values for given reservation key """ return self._ResDict.get(resID, {}) - def find(self, name='', val='') -> list: + def find(self, name='', val=''): """Search for property and associated value in reservation data. Args: @@ -4203,7 +4204,7 @@ cdef class reservation: val (str): value string to match Returns: - List of IDs that match + (list): List of IDs that match """ # [ key for key, value in self._ResDict.items() if self._ResDict[key]['state'] == 'error'] @@ -4253,11 +4254,11 @@ cdef class reservation: if self._Res_ptr is not NULL: slurm.slurm_free_reservation_info_msg(self._Res_ptr) - def get(self) -> dict: + def get(self): """Get slurm reservation information. Returns: - Data whose key is the Reservation ID + (dict): Data whose key is the Reservation ID """ self.load() self.__get() @@ -4297,36 +4298,36 @@ cdef class reservation: self._ResDict = Reservations - def create(self, dict reservation_dict={}) -> int: + def create(self, dict reservation_dict={}): """Create slurm reservation. Args: reservation_dict (dict): Reservation information Returns: - 0 for success or a slurm error code + (int): 0 for success or a slurm error code """ return slurm_create_reservation(reservation_dict) - def delete(self, ResID) -> int: + def delete(self, ResID): """Delete slurm reservation. Args: ResID (int): ID of the reservation to delete Returns: - 0 for success or a slurm error code + (int): 0 for success or a slurm error code """ return slurm_delete_reservation(ResID) - def update(self, dict reservation_dict={}) -> int: + def update(self, dict reservation_dict={}): """Update a slurm reservation attributes. Args: reservation_dict (dict): Reservation information Returns: - 0 for success or -1 for error and slurm error code is set + (int): 0 for success or -1 for error and slurm error code is set """ return slurm_update_reservation(reservation_dict) @@ -4345,7 +4346,7 @@ cdef class reservation: # -def slurm_create_reservation(dict reservation_dict={}) -> str: +def slurm_create_reservation(dict reservation_dict={}): """Create a slurm reservation. Args: @@ -4353,7 +4354,7 @@ def slurm_create_reservation(dict reservation_dict={}) -> str: one is created by create_reservation_dict Returns: - The name of the reservation created. + (str): The name of the reservation created. """ cdef: slurm.resv_desc_msg_t resv_msg @@ -4446,7 +4447,7 @@ def slurm_create_reservation(dict reservation_dict={}) -> str: return resID -def slurm_update_reservation(dict reservation_dict={}) -> int: +def slurm_update_reservation(dict reservation_dict={}): """Update a slurm reservation. Args: @@ -4454,7 +4455,7 @@ def slurm_update_reservation(dict reservation_dict={}) -> int: one is created by create_reservation_dict Returns: - 0 for success or -1 for error, and the slurm error code is set + (int): 0 for success or -1 for error, and the slurm error code is set appropriately. """ cdef: @@ -4542,14 +4543,14 @@ def slurm_update_reservation(dict reservation_dict={}) -> int: return errCode -def slurm_delete_reservation(ResID) -> int: +def slurm_delete_reservation(ResID): """Delete a slurm reservation. Args: ResID (str): Reservation Identifier Returns: - 0 for success or -1 for error, and the slurm error code is set + (int): 0 for success or -1 for error, and the slurm error code is set appropriately. """ cdef slurm.reservation_name_msg_t resv_msg @@ -4570,14 +4571,14 @@ def slurm_delete_reservation(ResID) -> int: return errCode -def create_reservation_dict() -> dict: +def create_reservation_dict(): """Create and empty dict for use with create_reservation method. Returns a dictionary that can be populated by the user an used for the update_reservation and create_reservation calls. Returns: - Empty Reservation dictionary + (dict): Empty Reservation dictionary """ return { 'start_time': 0, @@ -4613,11 +4614,11 @@ cdef class topology: def __dealloc__(self): self.__free() - def lastUpdate(self) -> int: + def lastUpdate(self): """Get the time (epoch seconds) the retrieved data was updated. Returns: - Epoch seconds + (int): Epoch seconds """ return self._lastUpdate @@ -4646,11 +4647,11 @@ cdef class topology: return errCode - def get(self) -> dict: + def get(self): """Get slurm topology information. Returns: - Dictionary whose key is the Topology ID + (dict): Dictionary whose key is the Topology ID """ self.__load() self.__get() @@ -4714,11 +4715,11 @@ cdef class statistics: def __dealloc__(self): pass - def get(self) -> dict: + def get(self): """Get slurm statistics information. Returns: - Slurm Controller statistics + (dict): Slurm Controller statistics """ cdef: int errCode @@ -4814,11 +4815,10 @@ cdef class statistics: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def reset(self) -> int: - """ - Reset scheduling statistics + def reset(self): + """Reset scheduling statistics - This method required root privileges. + This method requires root privileges. """ cdef: int apiError @@ -5142,27 +5142,27 @@ cdef class front_end: return errCode - def lastUpdate(self) -> int: + def lastUpdate(self): """Return last time (sepoch seconds) the node data was updated. Returns: - Epoch seconds + (int): Epoch seconds """ return self._lastUpdate - def ids(self) -> dict: + def ids(self): """Return the node IDs from retrieved data. Returns: - Dictionary of node IDs + (dict): Dictionary of node IDs """ return list(self._FrontEndDict.keys()) - def get(self) -> dict: + def get(self): """Get front end node information. Returns: - Dictionary whose key is the Topology ID + (dict): Dictionary whose key is the Topology ID """ self.__load() self.__get() @@ -5252,19 +5252,19 @@ cdef class qos: """ return self._lastUpdate - def ids(self) -> dict: + def ids(self): """Return the QOS IDs from retrieved data. Returns: - Dictionary of QOS IDs + (dict): Dictionary of QOS IDs """ return self._QOSDict.keys() - def get(self) -> dict: + def get(self): """Get slurm QOS information. Returns: - Dictionary whose key is the QOS ID + (dict): Dictionary whose key is the QOS ID """ self.__load() self.__get() @@ -5358,7 +5358,7 @@ cdef class slurmdb_jobs: slurm.slurmdb_connection_close(&self.db_conn) def get(self, jobids=[], userids=[], starttime=0, endtime=0, flags = None, - db_flags = None, clusters = []) -> dict: + db_flags = None, clusters = []): """Get Slurmdb information about some jobs. Input formats for start and end times: @@ -5385,7 +5385,7 @@ cdef class slurmdb_jobs: clusters (list): List of clusters Returns: - Dictionary whose key is the JOBS ID + (dict): Dictionary whose key is the JOBS ID """ cdef: int i = 0 @@ -5639,11 +5639,11 @@ cdef class slurmdb_reservations: else: raise MemoryError() - def get(self) -> dict: + def get(self): """Get slurm reservations information. Returns: - Dictionary whose keys are the reservations ids + (dict): Dictionary whose keys are the reservations ids """ cdef: slurm.List reservation_list @@ -5748,11 +5748,11 @@ cdef class slurmdb_clusters: else: raise MemoryError() - def get(self) -> dict: + def get(self): """Get slurm clusters information. Returns: - Dictionary whose keys are the clusters ids + (dict): Dictionary whose keys are the clusters ids """ cdef: slurm.List clusters_list @@ -5859,11 +5859,11 @@ cdef class slurmdb_events: else: raise MemoryError() - def get(self) -> dict: + def get(self): """Get slurm events information. Returns: - Dictionary whose keys are the events ids + (dict): Dictionary whose keys are the events ids """ cdef: slurm.List event_list @@ -5921,7 +5921,7 @@ cdef class slurmdb_reports: slurm.slurmdb_destroy_assoc_cond(self.assoc_cond) def report_cluster_account_by_user(self, starttime=None, - endtime=None) -> dict: + endtime=None): """sreport cluster AccountUtilizationByUser Args: @@ -5929,7 +5929,7 @@ cdef class slurmdb_reports: endtime (Union[str, int]): Start time Returns: - sreport information. + (dict): sreport information. """ cdef: slurm.List slurmdb_report_cluster_list = NULL @@ -6019,11 +6019,11 @@ cdef class slurmdb_reports: # -def get_last_slurm_error() -> int: +def get_last_slurm_error(): """Get and return the last error from a slurm API call. Returns: - Slurm error number and the associated error string + (int): Slurm error number and the associated error string """ rc = slurm.slurm_get_errno() @@ -6074,7 +6074,7 @@ def get_node_use(inx): return slurm.slurm_node_state_string(inx) -def get_trigger_res_type(uint16_t inx) -> str: +def get_trigger_res_type(uint16_t inx): """Returns a string that represents the slurm trigger res type. Args: @@ -6088,7 +6088,7 @@ def get_trigger_res_type(uint16_t inx) -> str: * TRIGGER_RES_TYPE_OTHER 7 Returns: - Trigger reservation state string + (str): Trigger reservation state string """ return __get_trigger_res_type(inx) @@ -6113,7 +6113,7 @@ cdef inline object __get_trigger_res_type(uint16_t ResType): return "%s" % rtype -def get_trigger_type(uint32_t inx) -> str: +def get_trigger_type(uint32_t inx): """Returns a string that represents the state of the slurm trigger. Args: @@ -6140,7 +6140,7 @@ def get_trigger_type(uint32_t inx) -> str: * TRIGGER_TYPE_BURST_BUFFER 0x00100000 Returns: - Trigger state string + (str): Trigger state string """ return __get_trigger_type(inx) @@ -6223,14 +6223,14 @@ cdef inline object __get_trigger_type(uint32_t TriggerType): # pass -def get_debug_flags(uint64_t inx) -> str: +def get_debug_flags(uint64_t inx): """Returns a string that represents the slurm debug flags. Args: flags (int): Slurm debug flags Returns: - Debug flag string + (str): Debug flag string """ return debug_flags2str(inx) @@ -6384,14 +6384,14 @@ def get_node_state(uint32_t inx): return slurm.slurm_node_state_string(inx) -def get_rm_partition_state(int inx) -> str: +def get_rm_partition_state(int inx): """Returns a string that represents the partition state. Args: inx (int): Slurm partition state Returns: - Partition state string + (str): Partition state string """ return __get_rm_partition_state(inx) @@ -6433,7 +6433,7 @@ def get_preempt_mode(uint16_t inx): return slurm.slurm_preempt_mode_string(inx) -def get_partition_state(uint16_t inx) -> str: +def get_partition_state(uint16_t inx): """Returns a string that represents the state of the slurm partition. Args: @@ -6444,7 +6444,7 @@ def get_partition_state(uint16_t inx) -> str: * PARTITION_INACTIVE 0x00 Returns: - Partition state string + (str): Partition state string """ state = "" if inx: @@ -6520,7 +6520,7 @@ cdef inline object __get_partition_state(int inx, int extended=0): return "%s" % state -def get_partition_mode(uint16_t flags=0, uint16_t max_share=0) -> str: +def get_partition_mode(uint16_t flags=0, uint16_t max_share=0): """Returns a string represents the state of the partition mode. Args: @@ -6528,7 +6528,7 @@ def get_partition_mode(uint16_t flags=0, uint16_t max_share=0) -> str: max_share (int): Max share Returns: - Partition mode string + (dict): Partition mode dict """ return __get_partition_mode(flags, max_share) @@ -6580,7 +6580,7 @@ cdef inline dict __get_partition_mode(uint16_t flags=0, uint16_t max_share=0): return mode -def get_job_state(inx) -> str: +def get_job_state(inx): """Return the state of the slurm job state. Args: @@ -6600,7 +6600,7 @@ def get_job_state(inx) -> str: * JOB_END Returns: - Job state string + (str): Job state string """ try: job_state = slurm.stringOrNone(slurm.slurm_job_state_string(inx), '') @@ -6609,27 +6609,27 @@ def get_job_state(inx) -> str: pass -def get_job_state_reason(inx) -> str: +def get_job_state_reason(inx): """Returns a reason why the slurm job is in a provided state. Args: inx (int): Slurm job state reason Returns: - Reason string + (str): Reason string """ job_reason = slurm.stringOrNone(slurm.slurm_job_reason_string(inx), '') return job_reason -def epoch2date(epochSecs) -> str: +def epoch2date(epochSecs): """Convert epoch secs to a python time string. Args: epochSecs (int): Seconds since epoch Returns: - Date str + (str): Date str """ try: dateTime = p_time.gmtime(epochSecs) @@ -6682,15 +6682,15 @@ cdef class licenses: """Free the memory allocated by load licenses method.""" pass - def lastUpdate(self) -> int: + def lastUpdate(self): """Return last time (epoch seconds) license data was updated. Returns: - Epoch seconds + (int): Epoch seconds """ return self._lastUpdate - def ids(self) -> dict: + def ids(self): """Return the current license names from retrieved license data. This method calls slurm_load_licenses to retrieve license information @@ -6698,7 +6698,7 @@ cdef class licenses: license message buffer. Returns: - Dictionary of licenses + (dict): Dictionary of licenses """ cdef: int rc @@ -6722,7 +6722,7 @@ cdef class licenses: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - def get(self) -> dict: + def get(self): """Get full license information from the slurm controller. This method calls slurm_load_licenses to retrieve license information @@ -6730,7 +6730,7 @@ cdef class licenses: license message buffer. Returns: - Dictionary whose key is the license name + (dict): Dictionary whose key is the license name """ cdef: int rc From 3dc607a9a3dce256a16bcda68baf06f3331a5813 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Sun, 30 Apr 2023 14:07:37 +0200 Subject: [PATCH 15/48] Reorganize slurm header definitions into seperate files (#282) - update the pyslurm_bindgen.py tool accordingly --- pyslurm/slurm/__init__.pxd | 14 +- pyslurm/slurm/{header.pxi => slurm.h.pxi} | 1488 +-------------------- pyslurm/slurm/slurm_errno.h.pxi | 336 +++++ pyslurm/slurm/slurm_version.h.pxi | 21 + pyslurm/slurm/slurmdb.h.pxi | 1192 +++++++++++++++++ scripts/pyslurm_bindgen.py | 125 +- 6 files changed, 1728 insertions(+), 1448 deletions(-) rename pyslurm/slurm/{header.pxi => slurm.h.pxi} (66%) create mode 100644 pyslurm/slurm/slurm_errno.h.pxi create mode 100644 pyslurm/slurm/slurm_version.h.pxi create mode 100644 pyslurm/slurm/slurmdb.h.pxi diff --git a/pyslurm/slurm/__init__.pxd b/pyslurm/slurm/__init__.pxd index 3c64b282..f1fbdd6f 100644 --- a/pyslurm/slurm/__init__.pxd +++ b/pyslurm/slurm/__init__.pxd @@ -23,25 +23,30 @@ from libc.string cimport ( memcpy, ) + cdef extern from '' nogil: ctypedef struct sockaddr_storage: pass + cdef extern from '' nogil: ctypedef struct FILE cdef FILE *stdout + cdef extern from '' nogil: ctypedef long time_t double difftime(time_t time1, time_t time2) time_t time(time_t *t) + cdef extern from '' nogil: cdef FILE *PyFile_AsFile(object file) char *__FILE__ cdef int __LINE__ char *__FUNCTION__ + cdef extern from '' nogil: ctypedef struct pthread_mutex_t: pass @@ -52,6 +57,7 @@ cdef extern from '' nogil: ctypedef struct pthread_t: pass + cdef extern from *: ctypedef struct slurm_job_credential ctypedef struct switch_jobinfo @@ -67,8 +73,12 @@ cdef extern from *: ctypedef struct slurm_step_ctx_struct ctypedef struct slurm_ctl_conf_t -# Header definitions combined from slurm.h, slurmdb.h and slurm_errno.h -include "header.pxi" + +# Header definitions +include "slurm_version.h.pxi" +include "slurm_errno.h.pxi" +include "slurm.h.pxi" +include "slurmdb.h.pxi" # Any other definitions which are not directly in # the header files, but exported in libslurm.so diff --git a/pyslurm/slurm/header.pxi b/pyslurm/slurm/slurm.h.pxi similarity index 66% rename from pyslurm/slurm/header.pxi rename to pyslurm/slurm/slurm.h.pxi index 2457fcbc..440a31cb 100644 --- a/pyslurm/slurm/header.pxi +++ b/pyslurm/slurm/slurm.h.pxi @@ -1,297 +1,55 @@ -cdef extern from "slurm/slurm_errno.h": - - uint8_t SLURM_SUCCESS - uint8_t ESPANK_SUCCESS - int8_t SLURM_ERROR - -cdef extern from "slurm/slurm_errno.h": - - ctypedef enum slurm_err_t: - SLURM_UNEXPECTED_MSG_ERROR - SLURM_COMMUNICATIONS_CONNECTION_ERROR - SLURM_COMMUNICATIONS_SEND_ERROR - SLURM_COMMUNICATIONS_RECEIVE_ERROR - SLURM_COMMUNICATIONS_SHUTDOWN_ERROR - SLURM_PROTOCOL_VERSION_ERROR - SLURM_PROTOCOL_IO_STREAM_VERSION_ERROR - SLURM_PROTOCOL_AUTHENTICATION_ERROR - SLURM_PROTOCOL_INSANE_MSG_LENGTH - SLURM_MPI_PLUGIN_NAME_INVALID - SLURM_MPI_PLUGIN_PRELAUNCH_SETUP_FAILED - SLURM_PLUGIN_NAME_INVALID - SLURM_UNKNOWN_FORWARD_ADDR - SLURM_COMMUNICATIONS_MISSING_SOCKET_ERROR - SLURMCTLD_COMMUNICATIONS_CONNECTION_ERROR - SLURMCTLD_COMMUNICATIONS_SEND_ERROR - SLURMCTLD_COMMUNICATIONS_RECEIVE_ERROR - SLURMCTLD_COMMUNICATIONS_SHUTDOWN_ERROR - SLURMCTLD_COMMUNICATIONS_BACKOFF - SLURM_NO_CHANGE_IN_DATA - ESLURM_INVALID_PARTITION_NAME - ESLURM_DEFAULT_PARTITION_NOT_SET - ESLURM_ACCESS_DENIED - ESLURM_JOB_MISSING_REQUIRED_PARTITION_GROUP - ESLURM_REQUESTED_NODES_NOT_IN_PARTITION - ESLURM_TOO_MANY_REQUESTED_CPUS - ESLURM_INVALID_NODE_COUNT - ESLURM_ERROR_ON_DESC_TO_RECORD_COPY - ESLURM_JOB_MISSING_SIZE_SPECIFICATION - ESLURM_JOB_SCRIPT_MISSING - ESLURM_USER_ID_MISSING - ESLURM_DUPLICATE_JOB_ID - ESLURM_PATHNAME_TOO_LONG - ESLURM_NOT_TOP_PRIORITY - ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE - ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE - ESLURM_NODES_BUSY - ESLURM_INVALID_JOB_ID - ESLURM_INVALID_NODE_NAME - ESLURM_WRITING_TO_FILE - ESLURM_TRANSITION_STATE_NO_UPDATE - ESLURM_ALREADY_DONE - ESLURM_INTERCONNECT_FAILURE - ESLURM_BAD_DIST - ESLURM_JOB_PENDING - ESLURM_BAD_TASK_COUNT - ESLURM_INVALID_JOB_CREDENTIAL - ESLURM_IN_STANDBY_MODE - ESLURM_INVALID_NODE_STATE - ESLURM_INVALID_FEATURE - ESLURM_INVALID_AUTHTYPE_CHANGE - ESLURM_ACTIVE_FEATURE_NOT_SUBSET - ESLURM_INVALID_SCHEDTYPE_CHANGE - ESLURM_INVALID_SELECTTYPE_CHANGE - ESLURM_INVALID_SWITCHTYPE_CHANGE - ESLURM_FRAGMENTATION - ESLURM_NOT_SUPPORTED - ESLURM_DISABLED - ESLURM_DEPENDENCY - ESLURM_BATCH_ONLY - ESLURM_LICENSES_UNAVAILABLE - ESLURM_JOB_HELD - ESLURM_INVALID_CRED_TYPE_CHANGE - ESLURM_INVALID_TASK_MEMORY - ESLURM_INVALID_ACCOUNT - ESLURM_INVALID_PARENT_ACCOUNT - ESLURM_SAME_PARENT_ACCOUNT - ESLURM_INVALID_LICENSES - ESLURM_NEED_RESTART - ESLURM_ACCOUNTING_POLICY - ESLURM_INVALID_TIME_LIMIT - ESLURM_RESERVATION_ACCESS - ESLURM_RESERVATION_INVALID - ESLURM_INVALID_TIME_VALUE - ESLURM_RESERVATION_BUSY - ESLURM_RESERVATION_NOT_USABLE - ESLURM_INVALID_WCKEY - ESLURM_RESERVATION_OVERLAP - ESLURM_PORTS_BUSY - ESLURM_PORTS_INVALID - ESLURM_PROLOG_RUNNING - ESLURM_NO_STEPS - ESLURM_INVALID_QOS - ESLURM_QOS_PREEMPTION_LOOP - ESLURM_NODE_NOT_AVAIL - ESLURM_INVALID_CPU_COUNT - ESLURM_PARTITION_NOT_AVAIL - ESLURM_CIRCULAR_DEPENDENCY - ESLURM_INVALID_GRES - ESLURM_JOB_NOT_PENDING - ESLURM_QOS_THRES - ESLURM_PARTITION_IN_USE - ESLURM_STEP_LIMIT - ESLURM_JOB_SUSPENDED - ESLURM_CAN_NOT_START_IMMEDIATELY - ESLURM_INTERCONNECT_BUSY - ESLURM_RESERVATION_EMPTY - ESLURM_INVALID_ARRAY - ESLURM_RESERVATION_NAME_DUP - ESLURM_JOB_STARTED - ESLURM_JOB_FINISHED - ESLURM_JOB_NOT_RUNNING - ESLURM_JOB_NOT_PENDING_NOR_RUNNING - ESLURM_JOB_NOT_SUSPENDED - ESLURM_JOB_NOT_FINISHED - ESLURM_TRIGGER_DUP - ESLURM_INTERNAL - ESLURM_INVALID_BURST_BUFFER_CHANGE - ESLURM_BURST_BUFFER_PERMISSION - ESLURM_BURST_BUFFER_LIMIT - ESLURM_INVALID_BURST_BUFFER_REQUEST - ESLURM_PRIO_RESET_FAIL - ESLURM_CANNOT_MODIFY_CRON_JOB - ESLURM_INVALID_JOB_CONTAINER_CHANGE - ESLURM_CANNOT_CANCEL_CRON_JOB - ESLURM_INVALID_MCS_LABEL - ESLURM_BURST_BUFFER_WAIT - ESLURM_PARTITION_DOWN - ESLURM_DUPLICATE_GRES - ESLURM_JOB_SETTING_DB_INX - ESLURM_RSV_ALREADY_STARTED - ESLURM_SUBMISSIONS_DISABLED - ESLURM_NOT_HET_JOB - ESLURM_NOT_HET_JOB_LEADER - ESLURM_NOT_WHOLE_HET_JOB - ESLURM_CORE_RESERVATION_UPDATE - ESLURM_DUPLICATE_STEP_ID - ESLURM_INVALID_CORE_CNT - ESLURM_X11_NOT_AVAIL - ESLURM_GROUP_ID_MISSING - ESLURM_BATCH_CONSTRAINT - ESLURM_INVALID_TRES - ESLURM_INVALID_TRES_BILLING_WEIGHTS - ESLURM_INVALID_JOB_DEFAULTS - ESLURM_RESERVATION_MAINT - ESLURM_INVALID_GRES_TYPE - ESLURM_REBOOT_IN_PROGRESS - ESLURM_MULTI_KNL_CONSTRAINT - ESLURM_UNSUPPORTED_GRES - ESLURM_INVALID_NICE - ESLURM_INVALID_TIME_MIN_LIMIT - ESLURM_DEFER - ESLURM_CONFIGLESS_DISABLED - ESLURM_ENVIRONMENT_MISSING - ESLURM_RESERVATION_NO_SKIP - ESLURM_RESERVATION_USER_GROUP - ESLURM_PARTITION_ASSOC - ESLURM_IN_STANDBY_USE_BACKUP - ESLURM_BAD_THREAD_PER_CORE - ESLURM_INVALID_PREFER - ESLURM_INSUFFICIENT_GRES - ESLURM_INVALID_CONTAINER_ID - ESLURM_EMPTY_JOB_ID - ESLURM_INVALID_JOB_ID_ZERO - ESLURM_INVALID_JOB_ID_NEGATIVE - ESLURM_INVALID_JOB_ID_TOO_LARGE - ESLURM_INVALID_JOB_ID_NON_NUMERIC - ESLURM_EMPTY_JOB_ARRAY_ID - ESLURM_INVALID_JOB_ARRAY_ID_NEGATIVE - ESLURM_INVALID_JOB_ARRAY_ID_TOO_LARGE - ESLURM_INVALID_JOB_ARRAY_ID_NON_NUMERIC - ESLURM_INVALID_HET_JOB_AND_ARRAY - ESLURM_EMPTY_HET_JOB_COMP - ESLURM_INVALID_HET_JOB_COMP_NEGATIVE - ESLURM_INVALID_HET_JOB_COMP_TOO_LARGE - ESLURM_INVALID_HET_JOB_COMP_NON_NUMERIC - ESLURM_EMPTY_STEP_ID - ESLURM_INVALID_STEP_ID_NEGATIVE - ESLURM_INVALID_STEP_ID_TOO_LARGE - ESLURM_INVALID_STEP_ID_NON_NUMERIC - ESLURM_EMPTY_HET_STEP - ESLURM_INVALID_HET_STEP_ZERO - ESLURM_INVALID_HET_STEP_NEGATIVE - ESLURM_INVALID_HET_STEP_TOO_LARGE - ESLURM_INVALID_HET_STEP_NON_NUMERIC - ESLURM_INVALID_HET_STEP_JOB - ESLURM_JOB_TIMEOUT_KILLED - ESLURM_JOB_NODE_FAIL_KILLED - ESPANK_ERROR - ESPANK_BAD_ARG - ESPANK_NOT_TASK - ESPANK_ENV_EXISTS - ESPANK_ENV_NOEXIST - ESPANK_NOSPACE - ESPANK_NOT_REMOTE - ESPANK_NOEXIST - ESPANK_NOT_EXECD - ESPANK_NOT_AVAIL - ESPANK_NOT_LOCAL - ESLURMD_KILL_TASK_FAILED - ESLURMD_KILL_JOB_ALREADY_COMPLETE - ESLURMD_INVALID_ACCT_FREQ - ESLURMD_INVALID_JOB_CREDENTIAL - ESLURMD_CREDENTIAL_EXPIRED - ESLURMD_CREDENTIAL_REVOKED - ESLURMD_CREDENTIAL_REPLAYED - ESLURMD_CREATE_BATCH_DIR_ERROR - ESLURMD_SETUP_ENVIRONMENT_ERROR - ESLURMD_SET_UID_OR_GID_ERROR - ESLURMD_EXECVE_FAILED - ESLURMD_IO_ERROR - ESLURMD_PROLOG_FAILED - ESLURMD_EPILOG_FAILED - ESLURMD_TOOMANYSTEPS - ESLURMD_STEP_EXISTS - ESLURMD_JOB_NOTRUNNING - ESLURMD_STEP_SUSPENDED - ESLURMD_STEP_NOTSUSPENDED - ESLURMD_INVALID_SOCKET_NAME_LEN - ESLURMD_CONTAINER_RUNTIME_INVALID - ESLURMD_CPU_BIND_ERROR - ESLURMD_CPU_LAYOUT_ERROR - ESLURM_PROTOCOL_INCOMPLETE_PACKET - SLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT - SLURM_PROTOCOL_SOCKET_ZERO_BYTES_SENT - ESLURM_AUTH_CRED_INVALID - ESLURM_AUTH_BADARG - ESLURM_AUTH_UNPACK - ESLURM_AUTH_SKIP - ESLURM_AUTH_UNABLE_TO_GENERATE_TOKEN - ESLURM_DB_CONNECTION - ESLURM_JOBS_RUNNING_ON_ASSOC - ESLURM_CLUSTER_DELETED - ESLURM_ONE_CHANGE - ESLURM_BAD_NAME - ESLURM_OVER_ALLOCATE - ESLURM_RESULT_TOO_LARGE - ESLURM_DB_QUERY_TOO_WIDE - ESLURM_DB_CONNECTION_INVALID - ESLURM_NO_REMOVE_DEFAULT_ACCOUNT - ESLURM_FED_CLUSTER_MAX_CNT - ESLURM_FED_CLUSTER_MULTIPLE_ASSIGNMENT - ESLURM_INVALID_CLUSTER_FEATURE - ESLURM_JOB_NOT_FEDERATED - ESLURM_INVALID_CLUSTER_NAME - ESLURM_FED_JOB_LOCK - ESLURM_FED_NO_VALID_CLUSTERS - ESLURM_MISSING_TIME_LIMIT - ESLURM_INVALID_KNL - ESLURM_PLUGIN_INVALID - ESLURM_PLUGIN_INCOMPLETE - ESLURM_PLUGIN_NOT_LOADED - ESLURM_REST_INVALID_QUERY - ESLURM_REST_FAIL_PARSING - ESLURM_REST_INVALID_JOBS_DESC - ESLURM_REST_EMPTY_RESULT - ESLURM_REST_MISSING_UID - ESLURM_REST_MISSING_GID - ESLURM_DATA_PATH_NOT_FOUND - ESLURM_DATA_PTR_NULL - ESLURM_DATA_CONV_FAILED - ESLURM_DATA_REGEX_COMPILE - ESLURM_DATA_UNKNOWN_MIME_TYPE - ESLURM_DATA_TOO_LARGE - ESLURM_DATA_FLAGS_INVALID_TYPE - ESLURM_DATA_FLAGS_INVALID - ESLURM_DATA_EXPECTED_LIST - ESLURM_DATA_EXPECTED_DICT - ESLURM_DATA_AMBIGUOUS_MODIFY - ESLURM_DATA_AMBIGUOUS_QUERY - ESLURM_DATA_PARSE_NOTHING - ESLURM_CONTAINER_NOT_CONFIGURED - - ctypedef struct slurm_errtab_t: - int xe_number - char* xe_name - char* xe_message - - slurm_errtab_t slurm_errtab[] - - unsigned int slurm_errtab_size - - char* slurm_strerror(int errnum) - - void slurm_seterrno(int errnum) - - int slurm_get_errno() - - void slurm_perror(const char* msg) +############################################################################## +# NOTICE: This File has been generated by scripts/pyslurm_bindgen.py, which +# uses the autopxd2 tool in order to generate Cython compatible definitions +# from the slurm.h C-Header file. Basically, this can be seen as a modified +# version of the original header, with the following changes: +# +# * have the correct cython syntax for type definitions, e.g. "typedef struct +# " is converted to "ctypedef struct " +# * C-Macros are listed with their appropriate uint type +# * Any definitions that cannot be translated are not included in this file +# +# Generated on 2023-04-30T11:54:32.116465 +# +# The Original Copyright notice from slurm.h has been included +# below: +# +############################################################################## +# slurm.h - Definitions for all of the Slurm RPCs +############################################################################# +# Copyright (C) 2002-2007 The Regents of the University of California. +# Copyright (C) 2008-2010 Lawrence Livermore National Security. +# Portions Copyright (C) 2010-2017 SchedMD LLC . +# Portions Copyright (C) 2012-2013 Los Alamos National Security, LLC. +# Portions Copyright 2013 Hewlett Packard Enterprise Development LP +# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +# Written by Morris Jette , et. al. +# CODE-OCEC-09-009. All rights reserved. +# +# Please also check the DISCLAIMER file in the Slurm repository here: +# https://github.com/SchedMD/slurm/blob/master/DISCLAIMER +############################################################################## +# +# Copyright (C) 2023 The PySlurm Authors (Modifications as described above) +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. cdef extern from "slurm/slurm.h": - enum: - SLURM_VERSION_NUMBER - uint8_t SYSTEM_DIMENSIONS uint8_t HIGHEST_DIMENSIONS uint8_t HOST_NAME_MAX @@ -3103,1151 +2861,3 @@ cdef extern from "slurm/slurm.h": crontab_update_response_msg_t* slurm_update_crontab(uid_t uid, gid_t gid, char* crontab, list_t* jobs) int slurm_remove_crontab(uid_t uid, gid_t gid) - -cdef extern from "slurm/slurmdb.h": - - uint32_t QOS_FLAG_BASE - uint32_t QOS_FLAG_NOTSET - uint32_t QOS_FLAG_ADD - uint32_t QOS_FLAG_REMOVE - uint8_t QOS_FLAG_PART_MIN_NODE - uint8_t QOS_FLAG_PART_MAX_NODE - uint8_t QOS_FLAG_PART_TIME_LIMIT - uint8_t QOS_FLAG_ENFORCE_USAGE_THRES - uint8_t QOS_FLAG_NO_RESERVE - uint8_t QOS_FLAG_REQ_RESV - uint8_t QOS_FLAG_DENY_LIMIT - uint8_t QOS_FLAG_OVER_PART_QOS - uint16_t QOS_FLAG_NO_DECAY - uint16_t QOS_FLAG_USAGE_FACTOR_SAFE - uint32_t SLURMDB_RES_FLAG_BASE - uint32_t SLURMDB_RES_FLAG_NOTSET - uint32_t SLURMDB_RES_FLAG_ADD - uint32_t SLURMDB_RES_FLAG_REMOVE - uint8_t SLURMDB_RES_FLAG_ABSOLUTE - uint32_t FEDERATION_FLAG_BASE - uint32_t FEDERATION_FLAG_NOTSET - uint32_t FEDERATION_FLAG_ADD - uint32_t FEDERATION_FLAG_REMOVE - uint8_t CLUSTER_FED_STATE_BASE - uint16_t CLUSTER_FED_STATE_FLAGS - uint8_t CLUSTER_FED_STATE_DRAIN - uint8_t CLUSTER_FED_STATE_REMOVE - uint8_t SLURMDB_JOB_FLAG_NONE - uint8_t SLURMDB_JOB_CLEAR_SCHED - uint8_t SLURMDB_JOB_FLAG_NOTSET - uint8_t SLURMDB_JOB_FLAG_SUBMIT - uint8_t SLURMDB_JOB_FLAG_SCHED - uint8_t SLURMDB_JOB_FLAG_BACKFILL - uint8_t SLURMDB_JOB_FLAG_START_R - uint8_t JOBCOND_FLAG_DUP - uint8_t JOBCOND_FLAG_NO_STEP - uint8_t JOBCOND_FLAG_NO_TRUNC - uint8_t JOBCOND_FLAG_RUNAWAY - uint8_t JOBCOND_FLAG_WHOLE_HETJOB - uint8_t JOBCOND_FLAG_NO_WHOLE_HETJOB - uint8_t JOBCOND_FLAG_NO_WAIT - uint8_t JOBCOND_FLAG_NO_DEFAULT_USAGE - uint16_t JOBCOND_FLAG_SCRIPT - uint16_t JOBCOND_FLAG_ENV - uint16_t SLURMDB_PURGE_BASE - uint32_t SLURMDB_PURGE_FLAGS - uint32_t SLURMDB_PURGE_HOURS - uint32_t SLURMDB_PURGE_DAYS - uint32_t SLURMDB_PURGE_MONTHS - uint32_t SLURMDB_PURGE_ARCHIVE - uint32_t SLURMDB_FS_USE_PARENT - uint16_t SLURMDB_CLASSIFIED_FLAG - uint8_t SLURMDB_CLASS_BASE - uint8_t CLUSTER_FLAG_REGISTER - uint8_t CLUSTER_FLAG_A2 - uint8_t CLUSTER_FLAG_A3 - uint8_t CLUSTER_FLAG_A4 - uint8_t CLUSTER_FLAG_A5 - uint8_t CLUSTER_FLAG_A6 - uint8_t CLUSTER_FLAG_A7 - uint8_t CLUSTER_FLAG_MULTSD - uint16_t CLUSTER_FLAG_A9 - uint16_t CLUSTER_FLAG_FE - uint16_t CLUSTER_FLAG_CRAY - uint16_t CLUSTER_FLAG_FED - uint16_t CLUSTER_FLAG_EXT - uint8_t ASSOC_FLAG_DELETED - uint8_t ASSOC_FLAG_NO_UPDATE - uint8_t SLURMDB_EVENT_COND_OPEN - uint8_t DB_CONN_FLAG_CLUSTER_DEL - uint8_t DB_CONN_FLAG_ROLLBACK - -cdef extern from "slurm/slurmdb.h": - - ctypedef enum slurmdb_admin_level_t: - SLURMDB_ADMIN_NOTSET - SLURMDB_ADMIN_NONE - SLURMDB_ADMIN_OPERATOR - SLURMDB_ADMIN_SUPER_USER - - ctypedef enum slurmdb_classification_type_t: - SLURMDB_CLASS_NONE - SLURMDB_CLASS_CAPABILITY - SLURMDB_CLASS_CAPACITY - SLURMDB_CLASS_CAPAPACITY - - ctypedef enum slurmdb_event_type_t: - SLURMDB_EVENT_ALL - SLURMDB_EVENT_CLUSTER - SLURMDB_EVENT_NODE - - ctypedef enum slurmdb_problem_type_t: - SLURMDB_PROBLEM_NOT_SET - SLURMDB_PROBLEM_ACCT_NO_ASSOC - SLURMDB_PROBLEM_ACCT_NO_USERS - SLURMDB_PROBLEM_USER_NO_ASSOC - SLURMDB_PROBLEM_USER_NO_UID - - ctypedef enum slurmdb_report_sort_t: - SLURMDB_REPORT_SORT_TIME - SLURMDB_REPORT_SORT_NAME - - ctypedef enum slurmdb_report_time_format_t: - SLURMDB_REPORT_TIME_SECS - SLURMDB_REPORT_TIME_MINS - SLURMDB_REPORT_TIME_HOURS - SLURMDB_REPORT_TIME_PERCENT - SLURMDB_REPORT_TIME_SECS_PER - SLURMDB_REPORT_TIME_MINS_PER - SLURMDB_REPORT_TIME_HOURS_PER - - ctypedef enum slurmdb_resource_type_t: - SLURMDB_RESOURCE_NOTSET - SLURMDB_RESOURCE_LICENSE - - ctypedef enum slurmdb_update_type_t: - SLURMDB_UPDATE_NOTSET - SLURMDB_ADD_USER - SLURMDB_ADD_ASSOC - SLURMDB_ADD_COORD - SLURMDB_MODIFY_USER - SLURMDB_MODIFY_ASSOC - SLURMDB_REMOVE_USER - SLURMDB_REMOVE_ASSOC - SLURMDB_REMOVE_COORD - SLURMDB_ADD_QOS - SLURMDB_REMOVE_QOS - SLURMDB_MODIFY_QOS - SLURMDB_ADD_WCKEY - SLURMDB_REMOVE_WCKEY - SLURMDB_MODIFY_WCKEY - SLURMDB_ADD_CLUSTER - SLURMDB_REMOVE_CLUSTER - SLURMDB_REMOVE_ASSOC_USAGE - SLURMDB_ADD_RES - SLURMDB_REMOVE_RES - SLURMDB_MODIFY_RES - SLURMDB_REMOVE_QOS_USAGE - SLURMDB_ADD_TRES - SLURMDB_UPDATE_FEDS - - cdef enum cluster_fed_states: - CLUSTER_FED_STATE_NA - CLUSTER_FED_STATE_ACTIVE - CLUSTER_FED_STATE_INACTIVE - - ctypedef struct slurmdb_tres_rec_t: - uint64_t alloc_secs - uint32_t rec_count - uint64_t count - uint32_t id - char* name - char* type - - ctypedef struct slurmdb_assoc_cond_t: - List acct_list - List cluster_list - List def_qos_id_list - List format_list - List id_list - uint16_t only_defs - List parent_acct_list - List partition_list - List qos_list - time_t usage_end - time_t usage_start - List user_list - uint16_t with_usage - uint16_t with_deleted - uint16_t with_raw_qos - uint16_t with_sub_accts - uint16_t without_parent_info - uint16_t without_parent_limits - - ctypedef struct slurmdb_job_cond_t: - List acct_list - List associd_list - List cluster_list - List constraint_list - uint32_t cpus_max - uint32_t cpus_min - uint32_t db_flags - int32_t exitcode - uint32_t flags - List format_list - List groupid_list - List jobname_list - uint32_t nodes_max - uint32_t nodes_min - List partition_list - List qos_list - List reason_list - List resv_list - List resvid_list - List state_list - List step_list - uint32_t timelimit_max - uint32_t timelimit_min - time_t usage_end - time_t usage_start - char* used_nodes - List userid_list - List wckey_list - - ctypedef struct slurmdb_stats_t: - double act_cpufreq - uint64_t consumed_energy - char* tres_usage_in_ave - char* tres_usage_in_max - char* tres_usage_in_max_nodeid - char* tres_usage_in_max_taskid - char* tres_usage_in_min - char* tres_usage_in_min_nodeid - char* tres_usage_in_min_taskid - char* tres_usage_in_tot - char* tres_usage_out_ave - char* tres_usage_out_max - char* tres_usage_out_max_nodeid - char* tres_usage_out_max_taskid - char* tres_usage_out_min - char* tres_usage_out_min_nodeid - char* tres_usage_out_min_taskid - char* tres_usage_out_tot - - ctypedef struct slurmdb_account_cond_t: - slurmdb_assoc_cond_t* assoc_cond - List description_list - List organization_list - uint16_t with_assocs - uint16_t with_coords - uint16_t with_deleted - - cdef enum: - SLURMDB_ACCT_FLAG_NONE - SLURMDB_ACCT_FLAG_DELETED - - ctypedef struct slurmdb_account_rec_t: - List assoc_list - List coordinators - char* description - uint32_t flags - char* name - char* organization - - ctypedef struct slurmdb_accounting_rec_t: - uint64_t alloc_secs - uint32_t id - time_t period_start - slurmdb_tres_rec_t tres_rec - - ctypedef struct slurmdb_archive_cond_t: - char* archive_dir - char* archive_script - slurmdb_job_cond_t* job_cond - uint32_t purge_event - uint32_t purge_job - uint32_t purge_resv - uint32_t purge_step - uint32_t purge_suspend - uint32_t purge_txn - uint32_t purge_usage - - ctypedef struct slurmdb_archive_rec_t: - char* archive_file - char* insert - - ctypedef struct slurmdb_tres_cond_t: - uint64_t count - List format_list - List id_list - List name_list - List type_list - uint16_t with_deleted - - ctypedef slurmdb_assoc_usage slurmdb_assoc_usage_t - - ctypedef slurmdb_bf_usage slurmdb_bf_usage_t - - ctypedef slurmdb_user_rec slurmdb_user_rec_t - - cdef struct slurmdb_assoc_rec: - List accounting_list - char* acct - slurmdb_assoc_rec* assoc_next - slurmdb_assoc_rec* assoc_next_id - slurmdb_bf_usage_t* bf_usage - char* cluster - char* comment - uint32_t def_qos_id - uint16_t flags - uint32_t grp_jobs - uint32_t grp_jobs_accrue - uint32_t grp_submit_jobs - char* grp_tres - uint64_t* grp_tres_ctld - char* grp_tres_mins - uint64_t* grp_tres_mins_ctld - char* grp_tres_run_mins - uint64_t* grp_tres_run_mins_ctld - uint32_t grp_wall - uint32_t id - uint16_t is_def - slurmdb_assoc_usage_t* leaf_usage - uint32_t lft - uint32_t max_jobs - uint32_t max_jobs_accrue - uint32_t max_submit_jobs - char* max_tres_mins_pj - uint64_t* max_tres_mins_ctld - char* max_tres_run_mins - uint64_t* max_tres_run_mins_ctld - char* max_tres_pj - uint64_t* max_tres_ctld - char* max_tres_pn - uint64_t* max_tres_pn_ctld - uint32_t max_wall_pj - uint32_t min_prio_thresh - char* parent_acct - uint32_t parent_id - char* partition - uint32_t priority - List qos_list - uint32_t rgt - uint32_t shares_raw - uint32_t uid - slurmdb_assoc_usage_t* usage - char* user - slurmdb_user_rec_t* user_rec - - ctypedef slurmdb_assoc_rec slurmdb_assoc_rec_t - - cdef struct slurmdb_assoc_usage: - uint32_t accrue_cnt - List children_list - bitstr_t* grp_node_bitmap - uint16_t* grp_node_job_cnt - uint64_t* grp_used_tres - uint64_t* grp_used_tres_run_secs - double grp_used_wall - double fs_factor - uint32_t level_shares - slurmdb_assoc_rec_t* parent_assoc_ptr - double priority_norm - slurmdb_assoc_rec_t* fs_assoc_ptr - double shares_norm - uint32_t tres_cnt - long double usage_efctv - long double usage_norm - long double usage_raw - long double* usage_tres_raw - uint32_t used_jobs - uint32_t used_submit_jobs - long double level_fs - bitstr_t* valid_qos - - cdef struct slurmdb_bf_usage: - uint64_t count - time_t last_sched - - ctypedef struct slurmdb_cluster_cond_t: - uint16_t classification - List cluster_list - List federation_list - uint32_t flags - List format_list - List plugin_id_select_list - List rpc_version_list - time_t usage_end - time_t usage_start - uint16_t with_deleted - uint16_t with_usage - - ctypedef struct slurmdb_cluster_fed_t: - List feature_list - uint32_t id - char* name - void* recv - void* send - uint32_t state - bool sync_recvd - bool sync_sent - - cdef struct slurmdb_cluster_rec: - List accounting_list - uint16_t classification - time_t comm_fail_time - slurm_addr_t control_addr - char* control_host - uint32_t control_port - uint16_t dimensions - int* dim_size - slurmdb_cluster_fed_t fed - uint32_t flags - pthread_mutex_t lock - char* name - char* nodes - uint32_t plugin_id_select - slurmdb_assoc_rec_t* root_assoc - uint16_t rpc_version - List send_rpc - char* tres_str - - ctypedef struct slurmdb_cluster_accounting_rec_t: - uint64_t alloc_secs - uint64_t down_secs - uint64_t idle_secs - uint64_t over_secs - uint64_t pdown_secs - time_t period_start - uint64_t plan_secs - slurmdb_tres_rec_t tres_rec - - ctypedef struct slurmdb_clus_res_rec_t: - char* cluster - uint32_t allowed - - ctypedef struct slurmdb_coord_rec_t: - char* name - uint16_t direct - - ctypedef struct slurmdb_event_cond_t: - List cluster_list - uint32_t cond_flags - uint32_t cpus_max - uint32_t cpus_min - uint16_t event_type - List format_list - char* node_list - time_t period_end - time_t period_start - List reason_list - List reason_uid_list - List state_list - - ctypedef struct slurmdb_event_rec_t: - char* cluster - char* cluster_nodes - uint16_t event_type - char* node_name - time_t period_end - time_t period_start - char* reason - uint32_t reason_uid - uint32_t state - char* tres_str - - ctypedef struct slurmdb_federation_cond_t: - List cluster_list - List federation_list - List format_list - uint16_t with_deleted - - ctypedef struct slurmdb_federation_rec_t: - char* name - uint32_t flags - List cluster_list - - ctypedef struct slurmdb_job_rec_t: - char* account - char* admin_comment - uint32_t alloc_nodes - uint32_t array_job_id - uint32_t array_max_tasks - uint32_t array_task_id - char* array_task_str - uint32_t associd - char* blockid - char* cluster - char* constraints - char* container - uint64_t db_index - uint32_t derived_ec - char* derived_es - uint32_t elapsed - time_t eligible - time_t end - char* env - uint32_t exitcode - char* extra - char* failed_node - uint32_t flags - void* first_step_ptr - uint32_t gid - uint32_t het_job_id - uint32_t het_job_offset - uint32_t jobid - char* jobname - uint32_t lft - char* licenses - char* mcs_label - char* nodes - char* partition - uint32_t priority - uint32_t qosid - uint32_t req_cpus - uint64_t req_mem - uint32_t requid - uint32_t resvid - char* resv_name - char* script - uint32_t show_full - time_t start - uint32_t state - uint32_t state_reason_prev - List steps - time_t submit - char* submit_line - uint32_t suspended - char* system_comment - uint64_t sys_cpu_sec - uint64_t sys_cpu_usec - uint32_t timelimit - uint64_t tot_cpu_sec - uint64_t tot_cpu_usec - char* tres_alloc_str - char* tres_req_str - uint32_t uid - char* used_gres - char* user - uint64_t user_cpu_sec - uint64_t user_cpu_usec - char* wckey - uint32_t wckeyid - char* work_dir - - ctypedef struct slurmdb_qos_usage_t: - uint32_t accrue_cnt - List acct_limit_list - List job_list - bitstr_t* grp_node_bitmap - uint16_t* grp_node_job_cnt - uint32_t grp_used_jobs - uint32_t grp_used_submit_jobs - uint64_t* grp_used_tres - uint64_t* grp_used_tres_run_secs - double grp_used_wall - double norm_priority - uint32_t tres_cnt - long double usage_raw - long double* usage_tres_raw - List user_limit_list - - ctypedef struct slurmdb_qos_rec_t: - char* description - uint32_t id - uint32_t flags - uint32_t grace_time - uint32_t grp_jobs_accrue - uint32_t grp_jobs - uint32_t grp_submit_jobs - char* grp_tres - uint64_t* grp_tres_ctld - char* grp_tres_mins - uint64_t* grp_tres_mins_ctld - char* grp_tres_run_mins - uint64_t* grp_tres_run_mins_ctld - uint32_t grp_wall - double limit_factor - uint32_t max_jobs_pa - uint32_t max_jobs_pu - uint32_t max_jobs_accrue_pa - uint32_t max_jobs_accrue_pu - uint32_t max_submit_jobs_pa - uint32_t max_submit_jobs_pu - char* max_tres_mins_pj - uint64_t* max_tres_mins_pj_ctld - char* max_tres_pa - uint64_t* max_tres_pa_ctld - char* max_tres_pj - uint64_t* max_tres_pj_ctld - char* max_tres_pn - uint64_t* max_tres_pn_ctld - char* max_tres_pu - uint64_t* max_tres_pu_ctld - char* max_tres_run_mins_pa - uint64_t* max_tres_run_mins_pa_ctld - char* max_tres_run_mins_pu - uint64_t* max_tres_run_mins_pu_ctld - uint32_t max_wall_pj - uint32_t min_prio_thresh - char* min_tres_pj - uint64_t* min_tres_pj_ctld - char* name - bitstr_t* preempt_bitstr - List preempt_list - uint16_t preempt_mode - uint32_t preempt_exempt_time - uint32_t priority - slurmdb_qos_usage_t* usage - double usage_factor - double usage_thres - time_t blocked_until - - ctypedef struct slurmdb_qos_cond_t: - List description_list - List id_list - List format_list - List name_list - uint16_t preempt_mode - uint16_t with_deleted - - ctypedef struct slurmdb_reservation_cond_t: - List cluster_list - uint64_t flags - List format_list - List id_list - List name_list - char* nodes - time_t time_end - time_t time_start - uint16_t with_usage - - ctypedef struct slurmdb_reservation_rec_t: - char* assocs - char* cluster - char* comment - uint64_t flags - uint32_t id - char* name - char* nodes - char* node_inx - time_t time_end - time_t time_start - time_t time_start_prev - char* tres_str - double unused_wall - List tres_list - - ctypedef struct slurmdb_step_rec_t: - char* container - uint32_t elapsed - time_t end - int32_t exitcode - slurmdb_job_rec_t* job_ptr - uint32_t nnodes - char* nodes - uint32_t ntasks - char* pid_str - uint32_t req_cpufreq_min - uint32_t req_cpufreq_max - uint32_t req_cpufreq_gov - uint32_t requid - time_t start - uint32_t state - slurmdb_stats_t stats - slurm_step_id_t step_id - char* stepname - char* submit_line - uint32_t suspended - uint64_t sys_cpu_sec - uint32_t sys_cpu_usec - uint32_t task_dist - uint64_t tot_cpu_sec - uint32_t tot_cpu_usec - char* tres_alloc_str - uint64_t user_cpu_sec - uint32_t user_cpu_usec - - ctypedef struct slurmdb_res_cond_t: - list_t* allowed_list - List cluster_list - List description_list - uint32_t flags - List format_list - List id_list - List manager_list - List name_list - List server_list - List type_list - uint16_t with_deleted - uint16_t with_clusters - - ctypedef struct slurmdb_res_rec_t: - uint32_t allocated - uint32_t last_consumed - List clus_res_list - slurmdb_clus_res_rec_t* clus_res_rec - uint32_t count - char* description - uint32_t flags - uint32_t id - time_t last_update - char* manager - char* name - char* server - uint32_t type - - ctypedef struct slurmdb_txn_cond_t: - List acct_list - List action_list - List actor_list - List cluster_list - List format_list - List id_list - List info_list - List name_list - time_t time_end - time_t time_start - List user_list - uint16_t with_assoc_info - - ctypedef struct slurmdb_txn_rec_t: - char* accts - uint16_t action - char* actor_name - char* clusters - uint32_t id - char* set_info - time_t timestamp - char* users - char* where_query - - ctypedef struct slurmdb_used_limits_t: - uint32_t accrue_cnt - char* acct - uint32_t jobs - uint32_t submit_jobs - uint64_t* tres - uint64_t* tres_run_mins - bitstr_t* node_bitmap - uint16_t* node_job_cnt - uint32_t uid - - ctypedef struct slurmdb_user_cond_t: - uint16_t admin_level - slurmdb_assoc_cond_t* assoc_cond - List def_acct_list - List def_wckey_list - uint16_t with_assocs - uint16_t with_coords - uint16_t with_deleted - uint16_t with_wckeys - uint16_t without_defaults - - cdef enum: - SLURMDB_USER_FLAG_NONE - SLURMDB_USER_FLAG_DELETED - - cdef struct slurmdb_user_rec: - uint16_t admin_level - List assoc_list - slurmdb_bf_usage_t* bf_usage - List coord_accts - char* default_acct - char* default_wckey - uint32_t flags - char* name - char* old_name - uint32_t uid - List wckey_list - - ctypedef struct slurmdb_update_object_t: - List objects - uint16_t type - - ctypedef struct slurmdb_wckey_cond_t: - List cluster_list - List format_list - List id_list - List name_list - uint16_t only_defs - time_t usage_end - time_t usage_start - List user_list - uint16_t with_usage - uint16_t with_deleted - - cdef enum: - SLURMDB_WCKEY_FLAG_NONE - SLURMDB_WCKEY_FLAG_DELETED - - ctypedef struct slurmdb_wckey_rec_t: - List accounting_list - char* cluster - uint32_t flags - uint32_t id - uint16_t is_def - char* name - uint32_t uid - char* user - - ctypedef struct slurmdb_print_tree_t: - char* name - char* print_name - char* spaces - uint16_t user - - ctypedef struct slurmdb_hierarchical_rec_t: - slurmdb_assoc_rec_t* assoc - char* key - char* sort_name - List children - - ctypedef struct slurmdb_report_assoc_rec_t: - char* acct - char* cluster - char* parent_acct - List tres_list - char* user - - ctypedef struct slurmdb_report_user_rec_t: - char* acct - List acct_list - List assoc_list - char* name - List tres_list - uid_t uid - - ctypedef struct slurmdb_report_cluster_rec_t: - List accounting_list - List assoc_list - char* name - List tres_list - List user_list - - ctypedef struct slurmdb_report_job_grouping_t: - uint32_t count - List jobs - uint32_t min_size - uint32_t max_size - List tres_list - - ctypedef struct slurmdb_report_acct_grouping_t: - char* acct - uint32_t count - List groups - uint32_t lft - uint32_t rgt - List tres_list - - ctypedef struct slurmdb_report_cluster_grouping_t: - List acct_list - char* cluster - uint32_t count - List tres_list - - cdef enum: - DBD_ROLLUP_HOUR - DBD_ROLLUP_DAY - DBD_ROLLUP_MONTH - DBD_ROLLUP_COUNT - - ctypedef struct slurmdb_rollup_stats_t: - char* cluster_name - uint16_t count[4] - time_t timestamp[4] - uint64_t time_last[4] - uint64_t time_max[4] - uint64_t time_total[4] - - ctypedef struct slurmdb_rpc_obj_t: - uint32_t cnt - uint32_t id - uint64_t time - uint64_t time_ave - - ctypedef struct slurmdb_stats_rec_t: - slurmdb_rollup_stats_t* dbd_rollup_stats - List rollup_stats - List rpc_list - time_t time_start - List user_list - - slurmdb_cluster_rec_t* working_cluster_rec - - int slurmdb_accounts_add(void* db_conn, List acct_list) - - List slurmdb_accounts_get(void* db_conn, slurmdb_account_cond_t* acct_cond) - - List slurmdb_accounts_modify(void* db_conn, slurmdb_account_cond_t* acct_cond, slurmdb_account_rec_t* acct) - - List slurmdb_accounts_remove(void* db_conn, slurmdb_account_cond_t* acct_cond) - - int slurmdb_archive(void* db_conn, slurmdb_archive_cond_t* arch_cond) - - int slurmdb_archive_load(void* db_conn, slurmdb_archive_rec_t* arch_rec) - - int slurmdb_associations_add(void* db_conn, List assoc_list) - - List slurmdb_associations_get(void* db_conn, slurmdb_assoc_cond_t* assoc_cond) - - List slurmdb_associations_modify(void* db_conn, slurmdb_assoc_cond_t* assoc_cond, slurmdb_assoc_rec_t* assoc) - - List slurmdb_associations_remove(void* db_conn, slurmdb_assoc_cond_t* assoc_cond) - - int slurmdb_clusters_add(void* db_conn, List cluster_list) - - List slurmdb_clusters_get(void* db_conn, slurmdb_cluster_cond_t* cluster_cond) - - List slurmdb_clusters_modify(void* db_conn, slurmdb_cluster_cond_t* cluster_cond, slurmdb_cluster_rec_t* cluster) - - List slurmdb_clusters_remove(void* db_conn, slurmdb_cluster_cond_t* cluster_cond) - - List slurmdb_report_cluster_account_by_user(void* db_conn, slurmdb_assoc_cond_t* assoc_cond) - - List slurmdb_report_cluster_user_by_account(void* db_conn, slurmdb_assoc_cond_t* assoc_cond) - - List slurmdb_report_cluster_wckey_by_user(void* db_conn, slurmdb_wckey_cond_t* wckey_cond) - - List slurmdb_report_cluster_user_by_wckey(void* db_conn, slurmdb_wckey_cond_t* wckey_cond) - - List slurmdb_report_job_sizes_grouped_by_account(void* db_conn, slurmdb_job_cond_t* job_cond, List grouping_list, bool flat_view, bool acct_as_parent) - - List slurmdb_report_job_sizes_grouped_by_wckey(void* db_conn, slurmdb_job_cond_t* job_cond, List grouping_list) - - List slurmdb_report_job_sizes_grouped_by_account_then_wckey(void* db_conn, slurmdb_job_cond_t* job_cond, List grouping_list, bool flat_view, bool acct_as_parent) - - List slurmdb_report_user_top_usage(void* db_conn, slurmdb_user_cond_t* user_cond, bool group_accounts) - - void* slurmdb_connection_get(uint16_t* persist_conn_flags) - - int slurmdb_connection_close(void** db_conn) - - int slurmdb_connection_commit(void* db_conn, bool commit) - - int slurmdb_coord_add(void* db_conn, List acct_list, slurmdb_user_cond_t* user_cond) - - List slurmdb_coord_remove(void* db_conn, List acct_list, slurmdb_user_cond_t* user_cond) - - int slurmdb_federations_add(void* db_conn, List federation_list) - - List slurmdb_federations_modify(void* db_conn, slurmdb_federation_cond_t* fed_cond, slurmdb_federation_rec_t* fed) - - List slurmdb_federations_remove(void* db_conn, slurmdb_federation_cond_t* fed_cond) - - List slurmdb_federations_get(void* db_conn, slurmdb_federation_cond_t* fed_cond) - - List slurmdb_job_modify(void* db_conn, slurmdb_job_cond_t* job_cond, slurmdb_job_rec_t* job) - - List slurmdb_jobs_get(void* db_conn, slurmdb_job_cond_t* job_cond) - - int slurmdb_jobs_fix_runaway(void* db_conn, List jobs) - - int slurmdb_jobcomp_init() - - int slurmdb_jobcomp_fini() - - List slurmdb_jobcomp_jobs_get(slurmdb_job_cond_t* job_cond) - - int slurmdb_reconfig(void* db_conn) - - int slurmdb_shutdown(void* db_conn) - - int slurmdb_clear_stats(void* db_conn) - - int slurmdb_get_stats(void* db_conn, slurmdb_stats_rec_t** stats_pptr) - - List slurmdb_config_get(void* db_conn) - - List slurmdb_events_get(void* db_conn, slurmdb_event_cond_t* event_cond) - - List slurmdb_problems_get(void* db_conn, slurmdb_assoc_cond_t* assoc_cond) - - List slurmdb_reservations_get(void* db_conn, slurmdb_reservation_cond_t* resv_cond) - - List slurmdb_txn_get(void* db_conn, slurmdb_txn_cond_t* txn_cond) - - List slurmdb_get_info_cluster(char* cluster_names) - - int slurmdb_get_first_avail_cluster(job_desc_msg_t* req, char* cluster_names, slurmdb_cluster_rec_t** cluster_rec) - - int slurmdb_get_first_het_job_cluster(List job_req_list, char* cluster_names, slurmdb_cluster_rec_t** cluster_rec) - - void slurmdb_destroy_assoc_usage(void* object) - - void slurmdb_destroy_bf_usage(void* object) - - void slurmdb_destroy_bf_usage_members(void* object) - - void slurmdb_destroy_qos_usage(void* object) - - void slurmdb_destroy_user_rec(void* object) - - void slurmdb_destroy_account_rec(void* object) - - void slurmdb_destroy_coord_rec(void* object) - - void slurmdb_destroy_clus_res_rec(void* object) - - void slurmdb_destroy_cluster_accounting_rec(void* object) - - void slurmdb_destroy_cluster_rec(void* object) - - void slurmdb_destroy_federation_rec(void* object) - - void slurmdb_destroy_accounting_rec(void* object) - - void slurmdb_free_assoc_mgr_state_msg(void* object) - - void slurmdb_free_assoc_rec_members(slurmdb_assoc_rec_t* assoc) - - void slurmdb_destroy_assoc_rec(void* object) - - void slurmdb_destroy_event_rec(void* object) - - void slurmdb_destroy_job_rec(void* object) - - void slurmdb_free_qos_rec_members(slurmdb_qos_rec_t* qos) - - void slurmdb_destroy_qos_rec(void* object) - - void slurmdb_destroy_reservation_rec(void* object) - - void slurmdb_destroy_step_rec(void* object) - - void slurmdb_destroy_res_rec(void* object) - - void slurmdb_destroy_txn_rec(void* object) - - void slurmdb_destroy_wckey_rec(void* object) - - void slurmdb_destroy_archive_rec(void* object) - - void slurmdb_destroy_tres_rec_noalloc(void* object) - - void slurmdb_destroy_tres_rec(void* object) - - void slurmdb_destroy_report_assoc_rec(void* object) - - void slurmdb_destroy_report_user_rec(void* object) - - void slurmdb_destroy_report_cluster_rec(void* object) - - void slurmdb_destroy_user_cond(void* object) - - void slurmdb_destroy_account_cond(void* object) - - void slurmdb_destroy_cluster_cond(void* object) - - void slurmdb_destroy_federation_cond(void* object) - - void slurmdb_destroy_tres_cond(void* object) - - void slurmdb_destroy_assoc_cond(void* object) - - void slurmdb_destroy_event_cond(void* object) - - void slurmdb_destroy_job_cond(void* object) - - void slurmdb_destroy_qos_cond(void* object) - - void slurmdb_destroy_reservation_cond(void* object) - - void slurmdb_destroy_res_cond(void* object) - - void slurmdb_destroy_txn_cond(void* object) - - void slurmdb_destroy_wckey_cond(void* object) - - void slurmdb_destroy_archive_cond(void* object) - - void slurmdb_destroy_update_object(void* object) - - void slurmdb_destroy_used_limits(void* object) - - void slurmdb_destroy_print_tree(void* object) - - void slurmdb_destroy_hierarchical_rec(void* object) - - void slurmdb_destroy_report_job_grouping(void* object) - - void slurmdb_destroy_report_acct_grouping(void* object) - - void slurmdb_destroy_report_cluster_grouping(void* object) - - void slurmdb_destroy_rpc_obj(void* object) - - void slurmdb_destroy_rollup_stats(void* object) - - void slurmdb_free_stats_rec_members(void* object) - - void slurmdb_destroy_stats_rec(void* object) - - void slurmdb_free_slurmdb_stats_members(slurmdb_stats_t* stats) - - void slurmdb_destroy_slurmdb_stats(slurmdb_stats_t* stats) - - void slurmdb_init_assoc_rec(slurmdb_assoc_rec_t* assoc, bool free_it) - - void slurmdb_init_clus_res_rec(slurmdb_clus_res_rec_t* clus_res, bool free_it) - - void slurmdb_init_cluster_rec(slurmdb_cluster_rec_t* cluster, bool free_it) - - void slurmdb_init_federation_rec(slurmdb_federation_rec_t* federation, bool free_it) - - void slurmdb_init_qos_rec(slurmdb_qos_rec_t* qos, bool free_it, uint32_t init_val) - - void slurmdb_init_res_rec(slurmdb_res_rec_t* res, bool free_it) - - void slurmdb_init_wckey_rec(slurmdb_wckey_rec_t* wckey, bool free_it) - - void slurmdb_init_tres_cond(slurmdb_tres_cond_t* tres, bool free_it) - - void slurmdb_init_cluster_cond(slurmdb_cluster_cond_t* cluster, bool free_it) - - void slurmdb_init_federation_cond(slurmdb_federation_cond_t* federation, bool free_it) - - void slurmdb_init_res_cond(slurmdb_res_cond_t* cluster, bool free_it) - - List slurmdb_get_hierarchical_sorted_assoc_list(List assoc_list, bool use_lft) - - List slurmdb_get_acct_hierarchical_rec_list(List assoc_list) - - char* slurmdb_tree_name_get(char* name, char* parent, List tree_list) - - int slurmdb_res_add(void* db_conn, List res_list) - - List slurmdb_res_get(void* db_conn, slurmdb_res_cond_t* res_cond) - - List slurmdb_res_modify(void* db_conn, slurmdb_res_cond_t* res_cond, slurmdb_res_rec_t* res) - - List slurmdb_res_remove(void* db_conn, slurmdb_res_cond_t* res_cond) - - int slurmdb_qos_add(void* db_conn, List qos_list) - - List slurmdb_qos_get(void* db_conn, slurmdb_qos_cond_t* qos_cond) - - List slurmdb_qos_modify(void* db_conn, slurmdb_qos_cond_t* qos_cond, slurmdb_qos_rec_t* qos) - - List slurmdb_qos_remove(void* db_conn, slurmdb_qos_cond_t* qos_cond) - - int slurmdb_tres_add(void* db_conn, List tres_list) - - List slurmdb_tres_get(void* db_conn, slurmdb_tres_cond_t* tres_cond) - - int slurmdb_usage_get(void* db_conn, void* in_, int type, time_t start, time_t end) - - int slurmdb_usage_roll(void* db_conn, time_t sent_start, time_t sent_end, uint16_t archive_data, List* rollup_stats_list_in) - - int slurmdb_users_add(void* db_conn, List user_list) - - List slurmdb_users_get(void* db_conn, slurmdb_user_cond_t* user_cond) - - List slurmdb_users_modify(void* db_conn, slurmdb_user_cond_t* user_cond, slurmdb_user_rec_t* user) - - List slurmdb_users_remove(void* db_conn, slurmdb_user_cond_t* user_cond) - - int slurmdb_wckeys_add(void* db_conn, List wckey_list) - - List slurmdb_wckeys_get(void* db_conn, slurmdb_wckey_cond_t* wckey_cond) - - List slurmdb_wckeys_modify(void* db_conn, slurmdb_wckey_cond_t* wckey_cond, slurmdb_wckey_rec_t* wckey) - - List slurmdb_wckeys_remove(void* db_conn, slurmdb_wckey_cond_t* wckey_cond) - diff --git a/pyslurm/slurm/slurm_errno.h.pxi b/pyslurm/slurm/slurm_errno.h.pxi new file mode 100644 index 00000000..c1c6517f --- /dev/null +++ b/pyslurm/slurm/slurm_errno.h.pxi @@ -0,0 +1,336 @@ +############################################################################## +# NOTICE: This File has been generated by scripts/pyslurm_bindgen.py, which +# uses the autopxd2 tool in order to generate Cython compatible definitions +# from the slurm_errno.h C-Header file. Basically, this can be seen as a modified +# version of the original header, with the following changes: +# +# * have the correct cython syntax for type definitions, e.g. "typedef struct +# " is converted to "ctypedef struct " +# * C-Macros are listed with their appropriate uint type +# * Any definitions that cannot be translated are not included in this file +# +# Generated on 2023-04-30T11:54:32.011184 +# +# The Original Copyright notice from slurm_errno.h has been included +# below: +# +############################################################################## +# slurm_errno.h - error codes and functions for slurm +############################################################################## +# Copyright (C) 2002-2007 The Regents of the University of California. +# Copyright (C) 2008-2009 Lawrence Livermore National Security. +# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +# Written by Kevin Tew , +# Jim Garlick , et. al. +# CODE-OCEC-09-009. All rights reserved. +# +# Please also check the DISCLAIMER file in the Slurm repository here: +# https://github.com/SchedMD/slurm/blob/master/DISCLAIMER +############################################################################## +# +# Copyright (C) 2023 The PySlurm Authors (Modifications as described above) +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +cdef extern from "slurm/slurm_errno.h": + + uint8_t SLURM_SUCCESS + uint8_t ESPANK_SUCCESS + int8_t SLURM_ERROR + +cdef extern from "slurm/slurm_errno.h": + + ctypedef enum slurm_err_t: + SLURM_UNEXPECTED_MSG_ERROR + SLURM_COMMUNICATIONS_CONNECTION_ERROR + SLURM_COMMUNICATIONS_SEND_ERROR + SLURM_COMMUNICATIONS_RECEIVE_ERROR + SLURM_COMMUNICATIONS_SHUTDOWN_ERROR + SLURM_PROTOCOL_VERSION_ERROR + SLURM_PROTOCOL_IO_STREAM_VERSION_ERROR + SLURM_PROTOCOL_AUTHENTICATION_ERROR + SLURM_PROTOCOL_INSANE_MSG_LENGTH + SLURM_MPI_PLUGIN_NAME_INVALID + SLURM_MPI_PLUGIN_PRELAUNCH_SETUP_FAILED + SLURM_PLUGIN_NAME_INVALID + SLURM_UNKNOWN_FORWARD_ADDR + SLURM_COMMUNICATIONS_MISSING_SOCKET_ERROR + SLURMCTLD_COMMUNICATIONS_CONNECTION_ERROR + SLURMCTLD_COMMUNICATIONS_SEND_ERROR + SLURMCTLD_COMMUNICATIONS_RECEIVE_ERROR + SLURMCTLD_COMMUNICATIONS_SHUTDOWN_ERROR + SLURMCTLD_COMMUNICATIONS_BACKOFF + SLURM_NO_CHANGE_IN_DATA + ESLURM_INVALID_PARTITION_NAME + ESLURM_DEFAULT_PARTITION_NOT_SET + ESLURM_ACCESS_DENIED + ESLURM_JOB_MISSING_REQUIRED_PARTITION_GROUP + ESLURM_REQUESTED_NODES_NOT_IN_PARTITION + ESLURM_TOO_MANY_REQUESTED_CPUS + ESLURM_INVALID_NODE_COUNT + ESLURM_ERROR_ON_DESC_TO_RECORD_COPY + ESLURM_JOB_MISSING_SIZE_SPECIFICATION + ESLURM_JOB_SCRIPT_MISSING + ESLURM_USER_ID_MISSING + ESLURM_DUPLICATE_JOB_ID + ESLURM_PATHNAME_TOO_LONG + ESLURM_NOT_TOP_PRIORITY + ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE + ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE + ESLURM_NODES_BUSY + ESLURM_INVALID_JOB_ID + ESLURM_INVALID_NODE_NAME + ESLURM_WRITING_TO_FILE + ESLURM_TRANSITION_STATE_NO_UPDATE + ESLURM_ALREADY_DONE + ESLURM_INTERCONNECT_FAILURE + ESLURM_BAD_DIST + ESLURM_JOB_PENDING + ESLURM_BAD_TASK_COUNT + ESLURM_INVALID_JOB_CREDENTIAL + ESLURM_IN_STANDBY_MODE + ESLURM_INVALID_NODE_STATE + ESLURM_INVALID_FEATURE + ESLURM_INVALID_AUTHTYPE_CHANGE + ESLURM_ACTIVE_FEATURE_NOT_SUBSET + ESLURM_INVALID_SCHEDTYPE_CHANGE + ESLURM_INVALID_SELECTTYPE_CHANGE + ESLURM_INVALID_SWITCHTYPE_CHANGE + ESLURM_FRAGMENTATION + ESLURM_NOT_SUPPORTED + ESLURM_DISABLED + ESLURM_DEPENDENCY + ESLURM_BATCH_ONLY + ESLURM_LICENSES_UNAVAILABLE + ESLURM_JOB_HELD + ESLURM_INVALID_CRED_TYPE_CHANGE + ESLURM_INVALID_TASK_MEMORY + ESLURM_INVALID_ACCOUNT + ESLURM_INVALID_PARENT_ACCOUNT + ESLURM_SAME_PARENT_ACCOUNT + ESLURM_INVALID_LICENSES + ESLURM_NEED_RESTART + ESLURM_ACCOUNTING_POLICY + ESLURM_INVALID_TIME_LIMIT + ESLURM_RESERVATION_ACCESS + ESLURM_RESERVATION_INVALID + ESLURM_INVALID_TIME_VALUE + ESLURM_RESERVATION_BUSY + ESLURM_RESERVATION_NOT_USABLE + ESLURM_INVALID_WCKEY + ESLURM_RESERVATION_OVERLAP + ESLURM_PORTS_BUSY + ESLURM_PORTS_INVALID + ESLURM_PROLOG_RUNNING + ESLURM_NO_STEPS + ESLURM_INVALID_QOS + ESLURM_QOS_PREEMPTION_LOOP + ESLURM_NODE_NOT_AVAIL + ESLURM_INVALID_CPU_COUNT + ESLURM_PARTITION_NOT_AVAIL + ESLURM_CIRCULAR_DEPENDENCY + ESLURM_INVALID_GRES + ESLURM_JOB_NOT_PENDING + ESLURM_QOS_THRES + ESLURM_PARTITION_IN_USE + ESLURM_STEP_LIMIT + ESLURM_JOB_SUSPENDED + ESLURM_CAN_NOT_START_IMMEDIATELY + ESLURM_INTERCONNECT_BUSY + ESLURM_RESERVATION_EMPTY + ESLURM_INVALID_ARRAY + ESLURM_RESERVATION_NAME_DUP + ESLURM_JOB_STARTED + ESLURM_JOB_FINISHED + ESLURM_JOB_NOT_RUNNING + ESLURM_JOB_NOT_PENDING_NOR_RUNNING + ESLURM_JOB_NOT_SUSPENDED + ESLURM_JOB_NOT_FINISHED + ESLURM_TRIGGER_DUP + ESLURM_INTERNAL + ESLURM_INVALID_BURST_BUFFER_CHANGE + ESLURM_BURST_BUFFER_PERMISSION + ESLURM_BURST_BUFFER_LIMIT + ESLURM_INVALID_BURST_BUFFER_REQUEST + ESLURM_PRIO_RESET_FAIL + ESLURM_CANNOT_MODIFY_CRON_JOB + ESLURM_INVALID_JOB_CONTAINER_CHANGE + ESLURM_CANNOT_CANCEL_CRON_JOB + ESLURM_INVALID_MCS_LABEL + ESLURM_BURST_BUFFER_WAIT + ESLURM_PARTITION_DOWN + ESLURM_DUPLICATE_GRES + ESLURM_JOB_SETTING_DB_INX + ESLURM_RSV_ALREADY_STARTED + ESLURM_SUBMISSIONS_DISABLED + ESLURM_NOT_HET_JOB + ESLURM_NOT_HET_JOB_LEADER + ESLURM_NOT_WHOLE_HET_JOB + ESLURM_CORE_RESERVATION_UPDATE + ESLURM_DUPLICATE_STEP_ID + ESLURM_INVALID_CORE_CNT + ESLURM_X11_NOT_AVAIL + ESLURM_GROUP_ID_MISSING + ESLURM_BATCH_CONSTRAINT + ESLURM_INVALID_TRES + ESLURM_INVALID_TRES_BILLING_WEIGHTS + ESLURM_INVALID_JOB_DEFAULTS + ESLURM_RESERVATION_MAINT + ESLURM_INVALID_GRES_TYPE + ESLURM_REBOOT_IN_PROGRESS + ESLURM_MULTI_KNL_CONSTRAINT + ESLURM_UNSUPPORTED_GRES + ESLURM_INVALID_NICE + ESLURM_INVALID_TIME_MIN_LIMIT + ESLURM_DEFER + ESLURM_CONFIGLESS_DISABLED + ESLURM_ENVIRONMENT_MISSING + ESLURM_RESERVATION_NO_SKIP + ESLURM_RESERVATION_USER_GROUP + ESLURM_PARTITION_ASSOC + ESLURM_IN_STANDBY_USE_BACKUP + ESLURM_BAD_THREAD_PER_CORE + ESLURM_INVALID_PREFER + ESLURM_INSUFFICIENT_GRES + ESLURM_INVALID_CONTAINER_ID + ESLURM_EMPTY_JOB_ID + ESLURM_INVALID_JOB_ID_ZERO + ESLURM_INVALID_JOB_ID_NEGATIVE + ESLURM_INVALID_JOB_ID_TOO_LARGE + ESLURM_INVALID_JOB_ID_NON_NUMERIC + ESLURM_EMPTY_JOB_ARRAY_ID + ESLURM_INVALID_JOB_ARRAY_ID_NEGATIVE + ESLURM_INVALID_JOB_ARRAY_ID_TOO_LARGE + ESLURM_INVALID_JOB_ARRAY_ID_NON_NUMERIC + ESLURM_INVALID_HET_JOB_AND_ARRAY + ESLURM_EMPTY_HET_JOB_COMP + ESLURM_INVALID_HET_JOB_COMP_NEGATIVE + ESLURM_INVALID_HET_JOB_COMP_TOO_LARGE + ESLURM_INVALID_HET_JOB_COMP_NON_NUMERIC + ESLURM_EMPTY_STEP_ID + ESLURM_INVALID_STEP_ID_NEGATIVE + ESLURM_INVALID_STEP_ID_TOO_LARGE + ESLURM_INVALID_STEP_ID_NON_NUMERIC + ESLURM_EMPTY_HET_STEP + ESLURM_INVALID_HET_STEP_ZERO + ESLURM_INVALID_HET_STEP_NEGATIVE + ESLURM_INVALID_HET_STEP_TOO_LARGE + ESLURM_INVALID_HET_STEP_NON_NUMERIC + ESLURM_INVALID_HET_STEP_JOB + ESLURM_JOB_TIMEOUT_KILLED + ESLURM_JOB_NODE_FAIL_KILLED + ESPANK_ERROR + ESPANK_BAD_ARG + ESPANK_NOT_TASK + ESPANK_ENV_EXISTS + ESPANK_ENV_NOEXIST + ESPANK_NOSPACE + ESPANK_NOT_REMOTE + ESPANK_NOEXIST + ESPANK_NOT_EXECD + ESPANK_NOT_AVAIL + ESPANK_NOT_LOCAL + ESLURMD_KILL_TASK_FAILED + ESLURMD_KILL_JOB_ALREADY_COMPLETE + ESLURMD_INVALID_ACCT_FREQ + ESLURMD_INVALID_JOB_CREDENTIAL + ESLURMD_CREDENTIAL_EXPIRED + ESLURMD_CREDENTIAL_REVOKED + ESLURMD_CREDENTIAL_REPLAYED + ESLURMD_CREATE_BATCH_DIR_ERROR + ESLURMD_SETUP_ENVIRONMENT_ERROR + ESLURMD_SET_UID_OR_GID_ERROR + ESLURMD_EXECVE_FAILED + ESLURMD_IO_ERROR + ESLURMD_PROLOG_FAILED + ESLURMD_EPILOG_FAILED + ESLURMD_TOOMANYSTEPS + ESLURMD_STEP_EXISTS + ESLURMD_JOB_NOTRUNNING + ESLURMD_STEP_SUSPENDED + ESLURMD_STEP_NOTSUSPENDED + ESLURMD_INVALID_SOCKET_NAME_LEN + ESLURMD_CONTAINER_RUNTIME_INVALID + ESLURMD_CPU_BIND_ERROR + ESLURMD_CPU_LAYOUT_ERROR + ESLURM_PROTOCOL_INCOMPLETE_PACKET + SLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT + SLURM_PROTOCOL_SOCKET_ZERO_BYTES_SENT + ESLURM_AUTH_CRED_INVALID + ESLURM_AUTH_BADARG + ESLURM_AUTH_UNPACK + ESLURM_AUTH_SKIP + ESLURM_AUTH_UNABLE_TO_GENERATE_TOKEN + ESLURM_DB_CONNECTION + ESLURM_JOBS_RUNNING_ON_ASSOC + ESLURM_CLUSTER_DELETED + ESLURM_ONE_CHANGE + ESLURM_BAD_NAME + ESLURM_OVER_ALLOCATE + ESLURM_RESULT_TOO_LARGE + ESLURM_DB_QUERY_TOO_WIDE + ESLURM_DB_CONNECTION_INVALID + ESLURM_NO_REMOVE_DEFAULT_ACCOUNT + ESLURM_FED_CLUSTER_MAX_CNT + ESLURM_FED_CLUSTER_MULTIPLE_ASSIGNMENT + ESLURM_INVALID_CLUSTER_FEATURE + ESLURM_JOB_NOT_FEDERATED + ESLURM_INVALID_CLUSTER_NAME + ESLURM_FED_JOB_LOCK + ESLURM_FED_NO_VALID_CLUSTERS + ESLURM_MISSING_TIME_LIMIT + ESLURM_INVALID_KNL + ESLURM_PLUGIN_INVALID + ESLURM_PLUGIN_INCOMPLETE + ESLURM_PLUGIN_NOT_LOADED + ESLURM_REST_INVALID_QUERY + ESLURM_REST_FAIL_PARSING + ESLURM_REST_INVALID_JOBS_DESC + ESLURM_REST_EMPTY_RESULT + ESLURM_REST_MISSING_UID + ESLURM_REST_MISSING_GID + ESLURM_DATA_PATH_NOT_FOUND + ESLURM_DATA_PTR_NULL + ESLURM_DATA_CONV_FAILED + ESLURM_DATA_REGEX_COMPILE + ESLURM_DATA_UNKNOWN_MIME_TYPE + ESLURM_DATA_TOO_LARGE + ESLURM_DATA_FLAGS_INVALID_TYPE + ESLURM_DATA_FLAGS_INVALID + ESLURM_DATA_EXPECTED_LIST + ESLURM_DATA_EXPECTED_DICT + ESLURM_DATA_AMBIGUOUS_MODIFY + ESLURM_DATA_AMBIGUOUS_QUERY + ESLURM_DATA_PARSE_NOTHING + ESLURM_CONTAINER_NOT_CONFIGURED + + ctypedef struct slurm_errtab_t: + int xe_number + char* xe_name + char* xe_message + + slurm_errtab_t slurm_errtab[] + + unsigned int slurm_errtab_size + + char* slurm_strerror(int errnum) + + void slurm_seterrno(int errnum) + + int slurm_get_errno() + + void slurm_perror(const char* msg) diff --git a/pyslurm/slurm/slurm_version.h.pxi b/pyslurm/slurm/slurm_version.h.pxi new file mode 100644 index 00000000..a7710f93 --- /dev/null +++ b/pyslurm/slurm/slurm_version.h.pxi @@ -0,0 +1,21 @@ +# Copyright (C) 2023 The PySlurm Authors +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +cdef extern from "slurm/slurm_version.h": + + uint32_t SLURM_VERSION_NUMBER diff --git a/pyslurm/slurm/slurmdb.h.pxi b/pyslurm/slurm/slurmdb.h.pxi new file mode 100644 index 00000000..00b22f36 --- /dev/null +++ b/pyslurm/slurm/slurmdb.h.pxi @@ -0,0 +1,1192 @@ +############################################################################## +# NOTICE: This File has been generated by scripts/pyslurm_bindgen.py, which +# uses the autopxd2 tool in order to generate Cython compatible definitions +# from the slurmdb.h C-Header file. Basically, this can be seen as a modified +# version of the original header, with the following changes: +# +# * have the correct cython syntax for type definitions, e.g. "typedef struct +# " is converted to "ctypedef struct " +# * C-Macros are listed with their appropriate uint type +# * Any definitions that cannot be translated are not included in this file +# +# Generated on 2023-04-30T11:54:32.267784 +# +# The Original Copyright notice from slurmdb.h has been included +# below: +# +############################################################################## +# slurmdb.h - Interface codes and functions for slurm +############################################################################## +# Copyright (C) 2010 Lawrence Livermore National Security. +# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +# Written by Danny Auble da@llnl.gov, et. al. +# CODE-OCEC-09-009. All rights reserved. +# +# Please also check the DISCLAIMER file in the Slurm repository here: +# https://github.com/SchedMD/slurm/blob/master/DISCLAIMER +############################################################################## +# +# Copyright (C) 2023 The PySlurm Authors (Modifications as described above) +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +cdef extern from "slurm/slurmdb.h": + + uint32_t QOS_FLAG_BASE + uint32_t QOS_FLAG_NOTSET + uint32_t QOS_FLAG_ADD + uint32_t QOS_FLAG_REMOVE + uint8_t QOS_FLAG_PART_MIN_NODE + uint8_t QOS_FLAG_PART_MAX_NODE + uint8_t QOS_FLAG_PART_TIME_LIMIT + uint8_t QOS_FLAG_ENFORCE_USAGE_THRES + uint8_t QOS_FLAG_NO_RESERVE + uint8_t QOS_FLAG_REQ_RESV + uint8_t QOS_FLAG_DENY_LIMIT + uint8_t QOS_FLAG_OVER_PART_QOS + uint16_t QOS_FLAG_NO_DECAY + uint16_t QOS_FLAG_USAGE_FACTOR_SAFE + uint32_t SLURMDB_RES_FLAG_BASE + uint32_t SLURMDB_RES_FLAG_NOTSET + uint32_t SLURMDB_RES_FLAG_ADD + uint32_t SLURMDB_RES_FLAG_REMOVE + uint8_t SLURMDB_RES_FLAG_ABSOLUTE + uint32_t FEDERATION_FLAG_BASE + uint32_t FEDERATION_FLAG_NOTSET + uint32_t FEDERATION_FLAG_ADD + uint32_t FEDERATION_FLAG_REMOVE + uint8_t CLUSTER_FED_STATE_BASE + uint16_t CLUSTER_FED_STATE_FLAGS + uint8_t CLUSTER_FED_STATE_DRAIN + uint8_t CLUSTER_FED_STATE_REMOVE + uint8_t SLURMDB_JOB_FLAG_NONE + uint8_t SLURMDB_JOB_CLEAR_SCHED + uint8_t SLURMDB_JOB_FLAG_NOTSET + uint8_t SLURMDB_JOB_FLAG_SUBMIT + uint8_t SLURMDB_JOB_FLAG_SCHED + uint8_t SLURMDB_JOB_FLAG_BACKFILL + uint8_t SLURMDB_JOB_FLAG_START_R + uint8_t JOBCOND_FLAG_DUP + uint8_t JOBCOND_FLAG_NO_STEP + uint8_t JOBCOND_FLAG_NO_TRUNC + uint8_t JOBCOND_FLAG_RUNAWAY + uint8_t JOBCOND_FLAG_WHOLE_HETJOB + uint8_t JOBCOND_FLAG_NO_WHOLE_HETJOB + uint8_t JOBCOND_FLAG_NO_WAIT + uint8_t JOBCOND_FLAG_NO_DEFAULT_USAGE + uint16_t JOBCOND_FLAG_SCRIPT + uint16_t JOBCOND_FLAG_ENV + uint16_t SLURMDB_PURGE_BASE + uint32_t SLURMDB_PURGE_FLAGS + uint32_t SLURMDB_PURGE_HOURS + uint32_t SLURMDB_PURGE_DAYS + uint32_t SLURMDB_PURGE_MONTHS + uint32_t SLURMDB_PURGE_ARCHIVE + uint32_t SLURMDB_FS_USE_PARENT + uint16_t SLURMDB_CLASSIFIED_FLAG + uint8_t SLURMDB_CLASS_BASE + uint8_t CLUSTER_FLAG_REGISTER + uint8_t CLUSTER_FLAG_A2 + uint8_t CLUSTER_FLAG_A3 + uint8_t CLUSTER_FLAG_A4 + uint8_t CLUSTER_FLAG_A5 + uint8_t CLUSTER_FLAG_A6 + uint8_t CLUSTER_FLAG_A7 + uint8_t CLUSTER_FLAG_MULTSD + uint16_t CLUSTER_FLAG_A9 + uint16_t CLUSTER_FLAG_FE + uint16_t CLUSTER_FLAG_CRAY + uint16_t CLUSTER_FLAG_FED + uint16_t CLUSTER_FLAG_EXT + uint8_t ASSOC_FLAG_DELETED + uint8_t ASSOC_FLAG_NO_UPDATE + uint8_t SLURMDB_EVENT_COND_OPEN + uint8_t DB_CONN_FLAG_CLUSTER_DEL + uint8_t DB_CONN_FLAG_ROLLBACK + +cdef extern from "slurm/slurmdb.h": + + ctypedef enum slurmdb_admin_level_t: + SLURMDB_ADMIN_NOTSET + SLURMDB_ADMIN_NONE + SLURMDB_ADMIN_OPERATOR + SLURMDB_ADMIN_SUPER_USER + + ctypedef enum slurmdb_classification_type_t: + SLURMDB_CLASS_NONE + SLURMDB_CLASS_CAPABILITY + SLURMDB_CLASS_CAPACITY + SLURMDB_CLASS_CAPAPACITY + + ctypedef enum slurmdb_event_type_t: + SLURMDB_EVENT_ALL + SLURMDB_EVENT_CLUSTER + SLURMDB_EVENT_NODE + + ctypedef enum slurmdb_problem_type_t: + SLURMDB_PROBLEM_NOT_SET + SLURMDB_PROBLEM_ACCT_NO_ASSOC + SLURMDB_PROBLEM_ACCT_NO_USERS + SLURMDB_PROBLEM_USER_NO_ASSOC + SLURMDB_PROBLEM_USER_NO_UID + + ctypedef enum slurmdb_report_sort_t: + SLURMDB_REPORT_SORT_TIME + SLURMDB_REPORT_SORT_NAME + + ctypedef enum slurmdb_report_time_format_t: + SLURMDB_REPORT_TIME_SECS + SLURMDB_REPORT_TIME_MINS + SLURMDB_REPORT_TIME_HOURS + SLURMDB_REPORT_TIME_PERCENT + SLURMDB_REPORT_TIME_SECS_PER + SLURMDB_REPORT_TIME_MINS_PER + SLURMDB_REPORT_TIME_HOURS_PER + + ctypedef enum slurmdb_resource_type_t: + SLURMDB_RESOURCE_NOTSET + SLURMDB_RESOURCE_LICENSE + + ctypedef enum slurmdb_update_type_t: + SLURMDB_UPDATE_NOTSET + SLURMDB_ADD_USER + SLURMDB_ADD_ASSOC + SLURMDB_ADD_COORD + SLURMDB_MODIFY_USER + SLURMDB_MODIFY_ASSOC + SLURMDB_REMOVE_USER + SLURMDB_REMOVE_ASSOC + SLURMDB_REMOVE_COORD + SLURMDB_ADD_QOS + SLURMDB_REMOVE_QOS + SLURMDB_MODIFY_QOS + SLURMDB_ADD_WCKEY + SLURMDB_REMOVE_WCKEY + SLURMDB_MODIFY_WCKEY + SLURMDB_ADD_CLUSTER + SLURMDB_REMOVE_CLUSTER + SLURMDB_REMOVE_ASSOC_USAGE + SLURMDB_ADD_RES + SLURMDB_REMOVE_RES + SLURMDB_MODIFY_RES + SLURMDB_REMOVE_QOS_USAGE + SLURMDB_ADD_TRES + SLURMDB_UPDATE_FEDS + + cdef enum cluster_fed_states: + CLUSTER_FED_STATE_NA + CLUSTER_FED_STATE_ACTIVE + CLUSTER_FED_STATE_INACTIVE + + ctypedef struct slurmdb_tres_rec_t: + uint64_t alloc_secs + uint32_t rec_count + uint64_t count + uint32_t id + char* name + char* type + + ctypedef struct slurmdb_assoc_cond_t: + List acct_list + List cluster_list + List def_qos_id_list + List format_list + List id_list + uint16_t only_defs + List parent_acct_list + List partition_list + List qos_list + time_t usage_end + time_t usage_start + List user_list + uint16_t with_usage + uint16_t with_deleted + uint16_t with_raw_qos + uint16_t with_sub_accts + uint16_t without_parent_info + uint16_t without_parent_limits + + ctypedef struct slurmdb_job_cond_t: + List acct_list + List associd_list + List cluster_list + List constraint_list + uint32_t cpus_max + uint32_t cpus_min + uint32_t db_flags + int32_t exitcode + uint32_t flags + List format_list + List groupid_list + List jobname_list + uint32_t nodes_max + uint32_t nodes_min + List partition_list + List qos_list + List reason_list + List resv_list + List resvid_list + List state_list + List step_list + uint32_t timelimit_max + uint32_t timelimit_min + time_t usage_end + time_t usage_start + char* used_nodes + List userid_list + List wckey_list + + ctypedef struct slurmdb_stats_t: + double act_cpufreq + uint64_t consumed_energy + char* tres_usage_in_ave + char* tres_usage_in_max + char* tres_usage_in_max_nodeid + char* tres_usage_in_max_taskid + char* tres_usage_in_min + char* tres_usage_in_min_nodeid + char* tres_usage_in_min_taskid + char* tres_usage_in_tot + char* tres_usage_out_ave + char* tres_usage_out_max + char* tres_usage_out_max_nodeid + char* tres_usage_out_max_taskid + char* tres_usage_out_min + char* tres_usage_out_min_nodeid + char* tres_usage_out_min_taskid + char* tres_usage_out_tot + + ctypedef struct slurmdb_account_cond_t: + slurmdb_assoc_cond_t* assoc_cond + List description_list + List organization_list + uint16_t with_assocs + uint16_t with_coords + uint16_t with_deleted + + cdef enum: + SLURMDB_ACCT_FLAG_NONE + SLURMDB_ACCT_FLAG_DELETED + + ctypedef struct slurmdb_account_rec_t: + List assoc_list + List coordinators + char* description + uint32_t flags + char* name + char* organization + + ctypedef struct slurmdb_accounting_rec_t: + uint64_t alloc_secs + uint32_t id + time_t period_start + slurmdb_tres_rec_t tres_rec + + ctypedef struct slurmdb_archive_cond_t: + char* archive_dir + char* archive_script + slurmdb_job_cond_t* job_cond + uint32_t purge_event + uint32_t purge_job + uint32_t purge_resv + uint32_t purge_step + uint32_t purge_suspend + uint32_t purge_txn + uint32_t purge_usage + + ctypedef struct slurmdb_archive_rec_t: + char* archive_file + char* insert + + ctypedef struct slurmdb_tres_cond_t: + uint64_t count + List format_list + List id_list + List name_list + List type_list + uint16_t with_deleted + + ctypedef slurmdb_assoc_usage slurmdb_assoc_usage_t + + ctypedef slurmdb_bf_usage slurmdb_bf_usage_t + + ctypedef slurmdb_user_rec slurmdb_user_rec_t + + cdef struct slurmdb_assoc_rec: + List accounting_list + char* acct + slurmdb_assoc_rec* assoc_next + slurmdb_assoc_rec* assoc_next_id + slurmdb_bf_usage_t* bf_usage + char* cluster + char* comment + uint32_t def_qos_id + uint16_t flags + uint32_t grp_jobs + uint32_t grp_jobs_accrue + uint32_t grp_submit_jobs + char* grp_tres + uint64_t* grp_tres_ctld + char* grp_tres_mins + uint64_t* grp_tres_mins_ctld + char* grp_tres_run_mins + uint64_t* grp_tres_run_mins_ctld + uint32_t grp_wall + uint32_t id + uint16_t is_def + slurmdb_assoc_usage_t* leaf_usage + uint32_t lft + uint32_t max_jobs + uint32_t max_jobs_accrue + uint32_t max_submit_jobs + char* max_tres_mins_pj + uint64_t* max_tres_mins_ctld + char* max_tres_run_mins + uint64_t* max_tres_run_mins_ctld + char* max_tres_pj + uint64_t* max_tres_ctld + char* max_tres_pn + uint64_t* max_tres_pn_ctld + uint32_t max_wall_pj + uint32_t min_prio_thresh + char* parent_acct + uint32_t parent_id + char* partition + uint32_t priority + List qos_list + uint32_t rgt + uint32_t shares_raw + uint32_t uid + slurmdb_assoc_usage_t* usage + char* user + slurmdb_user_rec_t* user_rec + + ctypedef slurmdb_assoc_rec slurmdb_assoc_rec_t + + cdef struct slurmdb_assoc_usage: + uint32_t accrue_cnt + List children_list + bitstr_t* grp_node_bitmap + uint16_t* grp_node_job_cnt + uint64_t* grp_used_tres + uint64_t* grp_used_tres_run_secs + double grp_used_wall + double fs_factor + uint32_t level_shares + slurmdb_assoc_rec_t* parent_assoc_ptr + double priority_norm + slurmdb_assoc_rec_t* fs_assoc_ptr + double shares_norm + uint32_t tres_cnt + long double usage_efctv + long double usage_norm + long double usage_raw + long double* usage_tres_raw + uint32_t used_jobs + uint32_t used_submit_jobs + long double level_fs + bitstr_t* valid_qos + + cdef struct slurmdb_bf_usage: + uint64_t count + time_t last_sched + + ctypedef struct slurmdb_cluster_cond_t: + uint16_t classification + List cluster_list + List federation_list + uint32_t flags + List format_list + List plugin_id_select_list + List rpc_version_list + time_t usage_end + time_t usage_start + uint16_t with_deleted + uint16_t with_usage + + ctypedef struct slurmdb_cluster_fed_t: + List feature_list + uint32_t id + char* name + void* recv + void* send + uint32_t state + bool sync_recvd + bool sync_sent + + cdef struct slurmdb_cluster_rec: + List accounting_list + uint16_t classification + time_t comm_fail_time + slurm_addr_t control_addr + char* control_host + uint32_t control_port + uint16_t dimensions + int* dim_size + slurmdb_cluster_fed_t fed + uint32_t flags + pthread_mutex_t lock + char* name + char* nodes + uint32_t plugin_id_select + slurmdb_assoc_rec_t* root_assoc + uint16_t rpc_version + List send_rpc + char* tres_str + + ctypedef struct slurmdb_cluster_accounting_rec_t: + uint64_t alloc_secs + uint64_t down_secs + uint64_t idle_secs + uint64_t over_secs + uint64_t pdown_secs + time_t period_start + uint64_t plan_secs + slurmdb_tres_rec_t tres_rec + + ctypedef struct slurmdb_clus_res_rec_t: + char* cluster + uint32_t allowed + + ctypedef struct slurmdb_coord_rec_t: + char* name + uint16_t direct + + ctypedef struct slurmdb_event_cond_t: + List cluster_list + uint32_t cond_flags + uint32_t cpus_max + uint32_t cpus_min + uint16_t event_type + List format_list + char* node_list + time_t period_end + time_t period_start + List reason_list + List reason_uid_list + List state_list + + ctypedef struct slurmdb_event_rec_t: + char* cluster + char* cluster_nodes + uint16_t event_type + char* node_name + time_t period_end + time_t period_start + char* reason + uint32_t reason_uid + uint32_t state + char* tres_str + + ctypedef struct slurmdb_federation_cond_t: + List cluster_list + List federation_list + List format_list + uint16_t with_deleted + + ctypedef struct slurmdb_federation_rec_t: + char* name + uint32_t flags + List cluster_list + + ctypedef struct slurmdb_job_rec_t: + char* account + char* admin_comment + uint32_t alloc_nodes + uint32_t array_job_id + uint32_t array_max_tasks + uint32_t array_task_id + char* array_task_str + uint32_t associd + char* blockid + char* cluster + char* constraints + char* container + uint64_t db_index + uint32_t derived_ec + char* derived_es + uint32_t elapsed + time_t eligible + time_t end + char* env + uint32_t exitcode + char* extra + char* failed_node + uint32_t flags + void* first_step_ptr + uint32_t gid + uint32_t het_job_id + uint32_t het_job_offset + uint32_t jobid + char* jobname + uint32_t lft + char* licenses + char* mcs_label + char* nodes + char* partition + uint32_t priority + uint32_t qosid + uint32_t req_cpus + uint64_t req_mem + uint32_t requid + uint32_t resvid + char* resv_name + char* script + uint32_t show_full + time_t start + uint32_t state + uint32_t state_reason_prev + List steps + time_t submit + char* submit_line + uint32_t suspended + char* system_comment + uint64_t sys_cpu_sec + uint64_t sys_cpu_usec + uint32_t timelimit + uint64_t tot_cpu_sec + uint64_t tot_cpu_usec + char* tres_alloc_str + char* tres_req_str + uint32_t uid + char* used_gres + char* user + uint64_t user_cpu_sec + uint64_t user_cpu_usec + char* wckey + uint32_t wckeyid + char* work_dir + + ctypedef struct slurmdb_qos_usage_t: + uint32_t accrue_cnt + List acct_limit_list + List job_list + bitstr_t* grp_node_bitmap + uint16_t* grp_node_job_cnt + uint32_t grp_used_jobs + uint32_t grp_used_submit_jobs + uint64_t* grp_used_tres + uint64_t* grp_used_tres_run_secs + double grp_used_wall + double norm_priority + uint32_t tres_cnt + long double usage_raw + long double* usage_tres_raw + List user_limit_list + + ctypedef struct slurmdb_qos_rec_t: + char* description + uint32_t id + uint32_t flags + uint32_t grace_time + uint32_t grp_jobs_accrue + uint32_t grp_jobs + uint32_t grp_submit_jobs + char* grp_tres + uint64_t* grp_tres_ctld + char* grp_tres_mins + uint64_t* grp_tres_mins_ctld + char* grp_tres_run_mins + uint64_t* grp_tres_run_mins_ctld + uint32_t grp_wall + double limit_factor + uint32_t max_jobs_pa + uint32_t max_jobs_pu + uint32_t max_jobs_accrue_pa + uint32_t max_jobs_accrue_pu + uint32_t max_submit_jobs_pa + uint32_t max_submit_jobs_pu + char* max_tres_mins_pj + uint64_t* max_tres_mins_pj_ctld + char* max_tres_pa + uint64_t* max_tres_pa_ctld + char* max_tres_pj + uint64_t* max_tres_pj_ctld + char* max_tres_pn + uint64_t* max_tres_pn_ctld + char* max_tres_pu + uint64_t* max_tres_pu_ctld + char* max_tres_run_mins_pa + uint64_t* max_tres_run_mins_pa_ctld + char* max_tres_run_mins_pu + uint64_t* max_tres_run_mins_pu_ctld + uint32_t max_wall_pj + uint32_t min_prio_thresh + char* min_tres_pj + uint64_t* min_tres_pj_ctld + char* name + bitstr_t* preempt_bitstr + List preempt_list + uint16_t preempt_mode + uint32_t preempt_exempt_time + uint32_t priority + slurmdb_qos_usage_t* usage + double usage_factor + double usage_thres + time_t blocked_until + + ctypedef struct slurmdb_qos_cond_t: + List description_list + List id_list + List format_list + List name_list + uint16_t preempt_mode + uint16_t with_deleted + + ctypedef struct slurmdb_reservation_cond_t: + List cluster_list + uint64_t flags + List format_list + List id_list + List name_list + char* nodes + time_t time_end + time_t time_start + uint16_t with_usage + + ctypedef struct slurmdb_reservation_rec_t: + char* assocs + char* cluster + char* comment + uint64_t flags + uint32_t id + char* name + char* nodes + char* node_inx + time_t time_end + time_t time_start + time_t time_start_prev + char* tres_str + double unused_wall + List tres_list + + ctypedef struct slurmdb_step_rec_t: + char* container + uint32_t elapsed + time_t end + int32_t exitcode + slurmdb_job_rec_t* job_ptr + uint32_t nnodes + char* nodes + uint32_t ntasks + char* pid_str + uint32_t req_cpufreq_min + uint32_t req_cpufreq_max + uint32_t req_cpufreq_gov + uint32_t requid + time_t start + uint32_t state + slurmdb_stats_t stats + slurm_step_id_t step_id + char* stepname + char* submit_line + uint32_t suspended + uint64_t sys_cpu_sec + uint32_t sys_cpu_usec + uint32_t task_dist + uint64_t tot_cpu_sec + uint32_t tot_cpu_usec + char* tres_alloc_str + uint64_t user_cpu_sec + uint32_t user_cpu_usec + + ctypedef struct slurmdb_res_cond_t: + list_t* allowed_list + List cluster_list + List description_list + uint32_t flags + List format_list + List id_list + List manager_list + List name_list + List server_list + List type_list + uint16_t with_deleted + uint16_t with_clusters + + ctypedef struct slurmdb_res_rec_t: + uint32_t allocated + uint32_t last_consumed + List clus_res_list + slurmdb_clus_res_rec_t* clus_res_rec + uint32_t count + char* description + uint32_t flags + uint32_t id + time_t last_update + char* manager + char* name + char* server + uint32_t type + + ctypedef struct slurmdb_txn_cond_t: + List acct_list + List action_list + List actor_list + List cluster_list + List format_list + List id_list + List info_list + List name_list + time_t time_end + time_t time_start + List user_list + uint16_t with_assoc_info + + ctypedef struct slurmdb_txn_rec_t: + char* accts + uint16_t action + char* actor_name + char* clusters + uint32_t id + char* set_info + time_t timestamp + char* users + char* where_query + + ctypedef struct slurmdb_used_limits_t: + uint32_t accrue_cnt + char* acct + uint32_t jobs + uint32_t submit_jobs + uint64_t* tres + uint64_t* tres_run_mins + bitstr_t* node_bitmap + uint16_t* node_job_cnt + uint32_t uid + + ctypedef struct slurmdb_user_cond_t: + uint16_t admin_level + slurmdb_assoc_cond_t* assoc_cond + List def_acct_list + List def_wckey_list + uint16_t with_assocs + uint16_t with_coords + uint16_t with_deleted + uint16_t with_wckeys + uint16_t without_defaults + + cdef enum: + SLURMDB_USER_FLAG_NONE + SLURMDB_USER_FLAG_DELETED + + cdef struct slurmdb_user_rec: + uint16_t admin_level + List assoc_list + slurmdb_bf_usage_t* bf_usage + List coord_accts + char* default_acct + char* default_wckey + uint32_t flags + char* name + char* old_name + uint32_t uid + List wckey_list + + ctypedef struct slurmdb_update_object_t: + List objects + uint16_t type + + ctypedef struct slurmdb_wckey_cond_t: + List cluster_list + List format_list + List id_list + List name_list + uint16_t only_defs + time_t usage_end + time_t usage_start + List user_list + uint16_t with_usage + uint16_t with_deleted + + cdef enum: + SLURMDB_WCKEY_FLAG_NONE + SLURMDB_WCKEY_FLAG_DELETED + + ctypedef struct slurmdb_wckey_rec_t: + List accounting_list + char* cluster + uint32_t flags + uint32_t id + uint16_t is_def + char* name + uint32_t uid + char* user + + ctypedef struct slurmdb_print_tree_t: + char* name + char* print_name + char* spaces + uint16_t user + + ctypedef struct slurmdb_hierarchical_rec_t: + slurmdb_assoc_rec_t* assoc + char* key + char* sort_name + List children + + ctypedef struct slurmdb_report_assoc_rec_t: + char* acct + char* cluster + char* parent_acct + List tres_list + char* user + + ctypedef struct slurmdb_report_user_rec_t: + char* acct + List acct_list + List assoc_list + char* name + List tres_list + uid_t uid + + ctypedef struct slurmdb_report_cluster_rec_t: + List accounting_list + List assoc_list + char* name + List tres_list + List user_list + + ctypedef struct slurmdb_report_job_grouping_t: + uint32_t count + List jobs + uint32_t min_size + uint32_t max_size + List tres_list + + ctypedef struct slurmdb_report_acct_grouping_t: + char* acct + uint32_t count + List groups + uint32_t lft + uint32_t rgt + List tres_list + + ctypedef struct slurmdb_report_cluster_grouping_t: + List acct_list + char* cluster + uint32_t count + List tres_list + + cdef enum: + DBD_ROLLUP_HOUR + DBD_ROLLUP_DAY + DBD_ROLLUP_MONTH + DBD_ROLLUP_COUNT + + ctypedef struct slurmdb_rollup_stats_t: + char* cluster_name + uint16_t count[4] + time_t timestamp[4] + uint64_t time_last[4] + uint64_t time_max[4] + uint64_t time_total[4] + + ctypedef struct slurmdb_rpc_obj_t: + uint32_t cnt + uint32_t id + uint64_t time + uint64_t time_ave + + ctypedef struct slurmdb_stats_rec_t: + slurmdb_rollup_stats_t* dbd_rollup_stats + List rollup_stats + List rpc_list + time_t time_start + List user_list + + slurmdb_cluster_rec_t* working_cluster_rec + + int slurmdb_accounts_add(void* db_conn, List acct_list) + + List slurmdb_accounts_get(void* db_conn, slurmdb_account_cond_t* acct_cond) + + List slurmdb_accounts_modify(void* db_conn, slurmdb_account_cond_t* acct_cond, slurmdb_account_rec_t* acct) + + List slurmdb_accounts_remove(void* db_conn, slurmdb_account_cond_t* acct_cond) + + int slurmdb_archive(void* db_conn, slurmdb_archive_cond_t* arch_cond) + + int slurmdb_archive_load(void* db_conn, slurmdb_archive_rec_t* arch_rec) + + int slurmdb_associations_add(void* db_conn, List assoc_list) + + List slurmdb_associations_get(void* db_conn, slurmdb_assoc_cond_t* assoc_cond) + + List slurmdb_associations_modify(void* db_conn, slurmdb_assoc_cond_t* assoc_cond, slurmdb_assoc_rec_t* assoc) + + List slurmdb_associations_remove(void* db_conn, slurmdb_assoc_cond_t* assoc_cond) + + int slurmdb_clusters_add(void* db_conn, List cluster_list) + + List slurmdb_clusters_get(void* db_conn, slurmdb_cluster_cond_t* cluster_cond) + + List slurmdb_clusters_modify(void* db_conn, slurmdb_cluster_cond_t* cluster_cond, slurmdb_cluster_rec_t* cluster) + + List slurmdb_clusters_remove(void* db_conn, slurmdb_cluster_cond_t* cluster_cond) + + List slurmdb_report_cluster_account_by_user(void* db_conn, slurmdb_assoc_cond_t* assoc_cond) + + List slurmdb_report_cluster_user_by_account(void* db_conn, slurmdb_assoc_cond_t* assoc_cond) + + List slurmdb_report_cluster_wckey_by_user(void* db_conn, slurmdb_wckey_cond_t* wckey_cond) + + List slurmdb_report_cluster_user_by_wckey(void* db_conn, slurmdb_wckey_cond_t* wckey_cond) + + List slurmdb_report_job_sizes_grouped_by_account(void* db_conn, slurmdb_job_cond_t* job_cond, List grouping_list, bool flat_view, bool acct_as_parent) + + List slurmdb_report_job_sizes_grouped_by_wckey(void* db_conn, slurmdb_job_cond_t* job_cond, List grouping_list) + + List slurmdb_report_job_sizes_grouped_by_account_then_wckey(void* db_conn, slurmdb_job_cond_t* job_cond, List grouping_list, bool flat_view, bool acct_as_parent) + + List slurmdb_report_user_top_usage(void* db_conn, slurmdb_user_cond_t* user_cond, bool group_accounts) + + void* slurmdb_connection_get(uint16_t* persist_conn_flags) + + int slurmdb_connection_close(void** db_conn) + + int slurmdb_connection_commit(void* db_conn, bool commit) + + int slurmdb_coord_add(void* db_conn, List acct_list, slurmdb_user_cond_t* user_cond) + + List slurmdb_coord_remove(void* db_conn, List acct_list, slurmdb_user_cond_t* user_cond) + + int slurmdb_federations_add(void* db_conn, List federation_list) + + List slurmdb_federations_modify(void* db_conn, slurmdb_federation_cond_t* fed_cond, slurmdb_federation_rec_t* fed) + + List slurmdb_federations_remove(void* db_conn, slurmdb_federation_cond_t* fed_cond) + + List slurmdb_federations_get(void* db_conn, slurmdb_federation_cond_t* fed_cond) + + List slurmdb_job_modify(void* db_conn, slurmdb_job_cond_t* job_cond, slurmdb_job_rec_t* job) + + List slurmdb_jobs_get(void* db_conn, slurmdb_job_cond_t* job_cond) + + int slurmdb_jobs_fix_runaway(void* db_conn, List jobs) + + int slurmdb_jobcomp_init() + + int slurmdb_jobcomp_fini() + + List slurmdb_jobcomp_jobs_get(slurmdb_job_cond_t* job_cond) + + int slurmdb_reconfig(void* db_conn) + + int slurmdb_shutdown(void* db_conn) + + int slurmdb_clear_stats(void* db_conn) + + int slurmdb_get_stats(void* db_conn, slurmdb_stats_rec_t** stats_pptr) + + List slurmdb_config_get(void* db_conn) + + List slurmdb_events_get(void* db_conn, slurmdb_event_cond_t* event_cond) + + List slurmdb_problems_get(void* db_conn, slurmdb_assoc_cond_t* assoc_cond) + + List slurmdb_reservations_get(void* db_conn, slurmdb_reservation_cond_t* resv_cond) + + List slurmdb_txn_get(void* db_conn, slurmdb_txn_cond_t* txn_cond) + + List slurmdb_get_info_cluster(char* cluster_names) + + int slurmdb_get_first_avail_cluster(job_desc_msg_t* req, char* cluster_names, slurmdb_cluster_rec_t** cluster_rec) + + int slurmdb_get_first_het_job_cluster(List job_req_list, char* cluster_names, slurmdb_cluster_rec_t** cluster_rec) + + void slurmdb_destroy_assoc_usage(void* object) + + void slurmdb_destroy_bf_usage(void* object) + + void slurmdb_destroy_bf_usage_members(void* object) + + void slurmdb_destroy_qos_usage(void* object) + + void slurmdb_destroy_user_rec(void* object) + + void slurmdb_destroy_account_rec(void* object) + + void slurmdb_destroy_coord_rec(void* object) + + void slurmdb_destroy_clus_res_rec(void* object) + + void slurmdb_destroy_cluster_accounting_rec(void* object) + + void slurmdb_destroy_cluster_rec(void* object) + + void slurmdb_destroy_federation_rec(void* object) + + void slurmdb_destroy_accounting_rec(void* object) + + void slurmdb_free_assoc_mgr_state_msg(void* object) + + void slurmdb_free_assoc_rec_members(slurmdb_assoc_rec_t* assoc) + + void slurmdb_destroy_assoc_rec(void* object) + + void slurmdb_destroy_event_rec(void* object) + + void slurmdb_destroy_job_rec(void* object) + + void slurmdb_free_qos_rec_members(slurmdb_qos_rec_t* qos) + + void slurmdb_destroy_qos_rec(void* object) + + void slurmdb_destroy_reservation_rec(void* object) + + void slurmdb_destroy_step_rec(void* object) + + void slurmdb_destroy_res_rec(void* object) + + void slurmdb_destroy_txn_rec(void* object) + + void slurmdb_destroy_wckey_rec(void* object) + + void slurmdb_destroy_archive_rec(void* object) + + void slurmdb_destroy_tres_rec_noalloc(void* object) + + void slurmdb_destroy_tres_rec(void* object) + + void slurmdb_destroy_report_assoc_rec(void* object) + + void slurmdb_destroy_report_user_rec(void* object) + + void slurmdb_destroy_report_cluster_rec(void* object) + + void slurmdb_destroy_user_cond(void* object) + + void slurmdb_destroy_account_cond(void* object) + + void slurmdb_destroy_cluster_cond(void* object) + + void slurmdb_destroy_federation_cond(void* object) + + void slurmdb_destroy_tres_cond(void* object) + + void slurmdb_destroy_assoc_cond(void* object) + + void slurmdb_destroy_event_cond(void* object) + + void slurmdb_destroy_job_cond(void* object) + + void slurmdb_destroy_qos_cond(void* object) + + void slurmdb_destroy_reservation_cond(void* object) + + void slurmdb_destroy_res_cond(void* object) + + void slurmdb_destroy_txn_cond(void* object) + + void slurmdb_destroy_wckey_cond(void* object) + + void slurmdb_destroy_archive_cond(void* object) + + void slurmdb_destroy_update_object(void* object) + + void slurmdb_destroy_used_limits(void* object) + + void slurmdb_destroy_print_tree(void* object) + + void slurmdb_destroy_hierarchical_rec(void* object) + + void slurmdb_destroy_report_job_grouping(void* object) + + void slurmdb_destroy_report_acct_grouping(void* object) + + void slurmdb_destroy_report_cluster_grouping(void* object) + + void slurmdb_destroy_rpc_obj(void* object) + + void slurmdb_destroy_rollup_stats(void* object) + + void slurmdb_free_stats_rec_members(void* object) + + void slurmdb_destroy_stats_rec(void* object) + + void slurmdb_free_slurmdb_stats_members(slurmdb_stats_t* stats) + + void slurmdb_destroy_slurmdb_stats(slurmdb_stats_t* stats) + + void slurmdb_init_assoc_rec(slurmdb_assoc_rec_t* assoc, bool free_it) + + void slurmdb_init_clus_res_rec(slurmdb_clus_res_rec_t* clus_res, bool free_it) + + void slurmdb_init_cluster_rec(slurmdb_cluster_rec_t* cluster, bool free_it) + + void slurmdb_init_federation_rec(slurmdb_federation_rec_t* federation, bool free_it) + + void slurmdb_init_qos_rec(slurmdb_qos_rec_t* qos, bool free_it, uint32_t init_val) + + void slurmdb_init_res_rec(slurmdb_res_rec_t* res, bool free_it) + + void slurmdb_init_wckey_rec(slurmdb_wckey_rec_t* wckey, bool free_it) + + void slurmdb_init_tres_cond(slurmdb_tres_cond_t* tres, bool free_it) + + void slurmdb_init_cluster_cond(slurmdb_cluster_cond_t* cluster, bool free_it) + + void slurmdb_init_federation_cond(slurmdb_federation_cond_t* federation, bool free_it) + + void slurmdb_init_res_cond(slurmdb_res_cond_t* cluster, bool free_it) + + List slurmdb_get_hierarchical_sorted_assoc_list(List assoc_list, bool use_lft) + + List slurmdb_get_acct_hierarchical_rec_list(List assoc_list) + + char* slurmdb_tree_name_get(char* name, char* parent, List tree_list) + + int slurmdb_res_add(void* db_conn, List res_list) + + List slurmdb_res_get(void* db_conn, slurmdb_res_cond_t* res_cond) + + List slurmdb_res_modify(void* db_conn, slurmdb_res_cond_t* res_cond, slurmdb_res_rec_t* res) + + List slurmdb_res_remove(void* db_conn, slurmdb_res_cond_t* res_cond) + + int slurmdb_qos_add(void* db_conn, List qos_list) + + List slurmdb_qos_get(void* db_conn, slurmdb_qos_cond_t* qos_cond) + + List slurmdb_qos_modify(void* db_conn, slurmdb_qos_cond_t* qos_cond, slurmdb_qos_rec_t* qos) + + List slurmdb_qos_remove(void* db_conn, slurmdb_qos_cond_t* qos_cond) + + int slurmdb_tres_add(void* db_conn, List tres_list) + + List slurmdb_tres_get(void* db_conn, slurmdb_tres_cond_t* tres_cond) + + int slurmdb_usage_get(void* db_conn, void* in_, int type, time_t start, time_t end) + + int slurmdb_usage_roll(void* db_conn, time_t sent_start, time_t sent_end, uint16_t archive_data, List* rollup_stats_list_in) + + int slurmdb_users_add(void* db_conn, List user_list) + + List slurmdb_users_get(void* db_conn, slurmdb_user_cond_t* user_cond) + + List slurmdb_users_modify(void* db_conn, slurmdb_user_cond_t* user_cond, slurmdb_user_rec_t* user) + + List slurmdb_users_remove(void* db_conn, slurmdb_user_cond_t* user_cond) + + int slurmdb_wckeys_add(void* db_conn, List wckey_list) + + List slurmdb_wckeys_get(void* db_conn, slurmdb_wckey_cond_t* wckey_cond) + + List slurmdb_wckeys_modify(void* db_conn, slurmdb_wckey_cond_t* wckey_cond, slurmdb_wckey_rec_t* wckey) + + List slurmdb_wckeys_remove(void* db_conn, slurmdb_wckey_cond_t* wckey_cond) diff --git a/scripts/pyslurm_bindgen.py b/scripts/pyslurm_bindgen.py index 3d952cf9..c4bbd18b 100755 --- a/scripts/pyslurm_bindgen.py +++ b/scripts/pyslurm_bindgen.py @@ -1,7 +1,28 @@ #!/usr/bin/env python3 +######################################################################### +# pyslurm_bindgen.py - generate cython compatible bindings for Slurm +######################################################################### +# Copyright (C) 2022 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import autopxd import click +from datetime import datetime import os import re import pathlib @@ -31,6 +52,19 @@ def get_data_type(val): raise ValueError("Cannot get data type for value: {}".format(val)) +def capture_copyright(hdr_file): + out = [] + for line in hdr_file: + if line.startswith("/"): + line = line.replace("/", "#").replace("\\", "") + line = line.replace("*", "#").lstrip() + out.append(line) + if "CODE-OCEC" in line: + break + + return "".join(out) + + def try_get_macro_value(s): if s.startswith("SLURM_BIT"): val = int(s[s.find("(")+1:s.find(")")]) @@ -51,11 +85,22 @@ def try_get_macro_value(s): return None +def write_to_file(content, hdr): + c = click.get_current_context() + output_dir = c.params["output_dir"] + + output_file = os.path.join(output_dir, hdr + ".pxi") + with open(output_file, "w") as ofile: + ofile.write(content) + + def translate_slurm_header(hdr_dir, hdr): hdr_path = os.path.join(hdr_dir, hdr) with open(hdr_path) as f: - translate_hdr_macros(f.readlines(), hdr) + lines = f.readlines() + copyright_notice = capture_copyright(lines) + macros = "".join(translate_hdr_macros(lines, hdr)) c = click.get_current_context() if c.params["show_unparsed_macros"] or c.params["generate_python_const"]: @@ -70,7 +115,56 @@ def translate_slurm_header(hdr_dir, hdr): ) ) - print(str(codegen)) + disclaimer = f"""\ +############################################################################## +# NOTICE: This File has been generated by scripts/pyslurm_bindgen.py, which +# uses the autopxd2 tool in order to generate Cython compatible definitions +# from the {hdr} C-Header file. Basically, this can be seen as a modified +# version of the original header, with the following changes: +# +# * have the correct cython syntax for type definitions, e.g. "typedef struct +# " is converted to "ctypedef struct " +# * C-Macros are listed with their appropriate uint type +# * Any definitions that cannot be translated are not included in this file +# +# Generated on {datetime.now().isoformat()} +# +# The Original Copyright notice from {hdr} has been included +# below: +# +{copyright_notice}# +# Please also check the DISCLAIMER file in the Slurm repository here: +# https://github.com/SchedMD/slurm/blob/master/DISCLAIMER +############################################################################## +""" + + pyslurm_copyright = """# +# Copyright (C) 2023 The PySlurm Authors (Modifications as described above) +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" + c = click.get_current_context() + code = disclaimer + pyslurm_copyright + macros + "\n" + str(codegen) + code = code.replace("cpdef", "cdef") + if c.params["stdout"]: + print(code) + else: + write_to_file(code, hdr) def handle_special_cases(name, hdr): @@ -122,17 +216,19 @@ def translate_hdr_macros(s, hdr): print("") return + out = [] if vals: if c.params["generate_python_const"]: for name, ty in vals.items(): print("{} = slurm.{}".format(name, name)) else: - print("cdef extern from \"{}\":".format("slurm/" + hdr)) - print("") + hdr_file = "slurm/" + hdr + out.append(f"cdef extern from \"{hdr_file}\":\n") + out.append("\n") for name, ty in vals.items(): - print(" {} {}".format(ty, name)) - print("") + out.append(f" {ty} {name}\n") + return out def setup_include_path(hdr_dir): include_dir = pathlib.Path(hdr_dir).parent.as_posix() @@ -164,7 +260,22 @@ def setup_include_path(hdr_dir): is_flag=True, help="Generate variables acting as constants from Slurm macros.", ) -def main(slurm_header_dir, show_unparsed_macros, generate_python_const): +@click.option( + "--output-dir", + "-o", + metavar="", + default="pyslurm/slurm", + help="Output Directory for the files", +) +@click.option( + "--stdout", + "-s", + default=False, + is_flag=True, + help="Instead of writing everything to files, just print to stdout.", +) +def main(slurm_header_dir, show_unparsed_macros, + generate_python_const, output_dir, stdout): setup_include_path(slurm_header_dir) translate_slurm_header(slurm_header_dir, "slurm_errno.h") translate_slurm_header(slurm_header_dir, "slurm.h") From 00430804ed4b3be7c8d0a6d30cc10962a0faa789 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Mon, 1 May 2023 14:16:08 +0200 Subject: [PATCH 16/48] Big rework (#283) * Rework the Job-API * Rework the Node-API * Add valgrind suppression file * Rework slurmdbd Job API * rework tests directory structure, split into "unit" and "integration" --- .gitignore | 4 + pyslurm/__init__.py | 45 + pyslurm/api.pxd | 26 + pyslurm/api.pyx | 43 + pyslurm/core/__init__.pxd | 0 pyslurm/core/__init__.py | 0 pyslurm/core/common/__init__.pxd | 32 + pyslurm/core/common/__init__.pyx | 349 ++++++ pyslurm/core/common/cstr.pxd | 39 + pyslurm/core/common/cstr.pyx | 287 +++++ pyslurm/core/common/ctime.pxd | 32 + pyslurm/core/common/ctime.pyx | 213 ++++ pyslurm/core/common/uint.pxd | 43 + pyslurm/core/common/uint.pyx | 181 +++ pyslurm/core/db/__init__.pxd | 0 pyslurm/core/db/__init__.py | 37 + pyslurm/core/db/connection.pxd | 43 + pyslurm/core/db/connection.pyx | 79 ++ pyslurm/core/db/job.pxd | 279 +++++ pyslurm/core/db/job.pyx | 598 ++++++++++ pyslurm/core/db/qos.pxd | 65 ++ pyslurm/core/db/qos.pyx | 194 ++++ pyslurm/core/db/stats.pxd | 143 +++ pyslurm/core/db/stats.pyx | 207 ++++ pyslurm/core/db/step.pxd | 100 ++ pyslurm/core/db/step.pyx | 177 +++ pyslurm/core/db/tres.pxd | 45 + pyslurm/core/db/tres.pyx | 112 ++ pyslurm/core/db/util.pxd | 65 ++ pyslurm/core/db/util.pyx | 188 ++++ pyslurm/core/error.pyx | 100 ++ pyslurm/core/job/__init__.pxd | 0 pyslurm/core/job/__init__.py | 3 + pyslurm/core/job/job.pxd | 387 +++++++ pyslurm/core/job/job.pyx | 1346 +++++++++++++++++++++++ pyslurm/core/job/sbatch_opts.pyx | 204 ++++ pyslurm/core/job/step.pxd | 139 +++ pyslurm/core/job/step.pyx | 463 ++++++++ pyslurm/core/job/submission.pxd | 619 +++++++++++ pyslurm/core/job/submission.pyx | 682 ++++++++++++ pyslurm/core/job/task_dist.pxd | 41 + pyslurm/core/job/task_dist.pyx | 352 ++++++ pyslurm/core/job/util.pyx | 345 ++++++ pyslurm/core/node.pxd | 222 ++++ pyslurm/core/node.pyx | 719 ++++++++++++ pyslurm/core/slurmctld.pxd | 38 + pyslurm/core/slurmctld.pyx | 48 + pyslurm/pyslurm.pyx | 23 - pyslurm/slurm/SLURM_DISCLAIMER | 159 +++ pyslurm/slurm/SLURM_LICENSE | 389 +++++++ pyslurm/slurm/__init__.pxd | 1 - pyslurm/slurm/extra.pxi | 104 +- pyslurm/slurm/xmalloc.h | 117 ++ setup.cfg | 6 + setup.py | 1 - tests/integration/conftest.py | 44 + tests/integration/test_db_connection.py | 56 + tests/integration/test_db_job.py | 100 ++ tests/integration/test_db_qos.py | 55 + tests/integration/test_job.py | 162 +++ tests/integration/test_job_steps.py | 180 +++ tests/integration/test_job_submit.py | 43 + tests/integration/test_node.py | 72 ++ tests/integration/util.py | 65 ++ tests/unit/test_common.py | 395 +++++++ tests/unit/test_db_job.py | 52 + tests/unit/test_db_qos.py | 49 + tests/unit/test_db_slurm_list.py | 134 +++ tests/unit/test_job.py | 74 ++ tests/unit/test_job_steps.py | 44 + tests/unit/test_job_submit.py | 306 ++++++ tests/unit/test_node.py | 44 + tests/unit/test_task_dist.py | 52 + tests/unit/util.py | 56 + valgrind-pyslurm.supp | 544 +++++++++ 75 files changed, 12620 insertions(+), 41 deletions(-) create mode 100644 pyslurm/api.pxd create mode 100644 pyslurm/api.pyx create mode 100644 pyslurm/core/__init__.pxd create mode 100644 pyslurm/core/__init__.py create mode 100644 pyslurm/core/common/__init__.pxd create mode 100644 pyslurm/core/common/__init__.pyx create mode 100644 pyslurm/core/common/cstr.pxd create mode 100644 pyslurm/core/common/cstr.pyx create mode 100644 pyslurm/core/common/ctime.pxd create mode 100644 pyslurm/core/common/ctime.pyx create mode 100644 pyslurm/core/common/uint.pxd create mode 100644 pyslurm/core/common/uint.pyx create mode 100644 pyslurm/core/db/__init__.pxd create mode 100644 pyslurm/core/db/__init__.py create mode 100644 pyslurm/core/db/connection.pxd create mode 100644 pyslurm/core/db/connection.pyx create mode 100644 pyslurm/core/db/job.pxd create mode 100644 pyslurm/core/db/job.pyx create mode 100644 pyslurm/core/db/qos.pxd create mode 100644 pyslurm/core/db/qos.pyx create mode 100644 pyslurm/core/db/stats.pxd create mode 100644 pyslurm/core/db/stats.pyx create mode 100644 pyslurm/core/db/step.pxd create mode 100644 pyslurm/core/db/step.pyx create mode 100644 pyslurm/core/db/tres.pxd create mode 100644 pyslurm/core/db/tres.pyx create mode 100644 pyslurm/core/db/util.pxd create mode 100644 pyslurm/core/db/util.pyx create mode 100644 pyslurm/core/error.pyx create mode 100644 pyslurm/core/job/__init__.pxd create mode 100644 pyslurm/core/job/__init__.py create mode 100644 pyslurm/core/job/job.pxd create mode 100644 pyslurm/core/job/job.pyx create mode 100644 pyslurm/core/job/sbatch_opts.pyx create mode 100644 pyslurm/core/job/step.pxd create mode 100644 pyslurm/core/job/step.pyx create mode 100644 pyslurm/core/job/submission.pxd create mode 100644 pyslurm/core/job/submission.pyx create mode 100644 pyslurm/core/job/task_dist.pxd create mode 100644 pyslurm/core/job/task_dist.pyx create mode 100644 pyslurm/core/job/util.pyx create mode 100644 pyslurm/core/node.pxd create mode 100644 pyslurm/core/node.pyx create mode 100644 pyslurm/core/slurmctld.pxd create mode 100644 pyslurm/core/slurmctld.pyx create mode 100644 pyslurm/slurm/SLURM_DISCLAIMER create mode 100644 pyslurm/slurm/SLURM_LICENSE create mode 100644 pyslurm/slurm/xmalloc.h create mode 100644 tests/integration/conftest.py create mode 100644 tests/integration/test_db_connection.py create mode 100644 tests/integration/test_db_job.py create mode 100644 tests/integration/test_db_qos.py create mode 100644 tests/integration/test_job.py create mode 100644 tests/integration/test_job_steps.py create mode 100644 tests/integration/test_job_submit.py create mode 100644 tests/integration/test_node.py create mode 100644 tests/integration/util.py create mode 100644 tests/unit/test_common.py create mode 100644 tests/unit/test_db_job.py create mode 100644 tests/unit/test_db_qos.py create mode 100644 tests/unit/test_db_slurm_list.py create mode 100644 tests/unit/test_job.py create mode 100644 tests/unit/test_job_steps.py create mode 100644 tests/unit/test_job_submit.py create mode 100644 tests/unit/test_node.py create mode 100644 tests/unit/test_task_dist.py create mode 100644 tests/unit/util.py create mode 100644 valgrind-pyslurm.supp diff --git a/.gitignore b/.gitignore index f79b3369..ef44eef6 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,9 @@ pyslurm/*.pxi~ pyslurm/*.pxd~ pyslurm/*.so pyslurm/*.c +pyslurm/**/*.c +pyslurm/**/*.so +pyslurm/**/__pycache__ # Ignore vim swap files *.swp @@ -25,6 +28,7 @@ tests/*.pyc # Ignore pycache (Python 3) */__pycache__ +*/**/__pycache__ # Ignore job output files *.out diff --git a/pyslurm/__init__.py b/pyslurm/__init__.py index 177bf7cb..aa9e26c6 100644 --- a/pyslurm/__init__.py +++ b/pyslurm/__init__.py @@ -16,6 +16,51 @@ from .pyslurm import * from .__version__ import __version__ +from pyslurm.core.job import ( + Job, + Jobs, + JobStep, + JobSteps, + JobSubmitDescription, +) + +from pyslurm.core import db +from pyslurm.core.node import Node, Nodes + +import pyslurm.core.error +from pyslurm.core.error import ( + RPCError, +) + +# Utility time functions +from pyslurm.core.common.ctime import ( + timestr_to_secs, + timestr_to_mins, + secs_to_timestr, + mins_to_timestr, + date_to_timestamp, + timestamp_to_date, +) + +# General utility functions +from pyslurm.core.common import ( + uid_to_name, + gid_to_name, + user_to_uid, + group_to_gid, + expand_range_str, + humanize, + dehumanize, + nodelist_from_range_str, + nodelist_to_range_str, +) + +from pyslurm.core import slurmctld + +# Initialize slurm api +from pyslurm.api import slurm_init, slurm_fini +slurm_init() + def version(): return __version__ diff --git a/pyslurm/api.pxd b/pyslurm/api.pxd new file mode 100644 index 00000000..9b19ec9a --- /dev/null +++ b/pyslurm/api.pxd @@ -0,0 +1,26 @@ +######################################################################### +# api.pxd - pyslurm core API +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.core.common cimport cstr diff --git a/pyslurm/api.pyx b/pyslurm/api.pyx new file mode 100644 index 00000000..0f34fedb --- /dev/null +++ b/pyslurm/api.pyx @@ -0,0 +1,43 @@ +######################################################################### +# api.pyx - pyslurm core API +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + + +def slurm_init(config_path=None): + """Initialize the Slurm API. + + This function must be called first before certain RPC functions can be + executed. slurm_init is automatically called when the pyslurm module is + loaded. + + Args: + config_path (str, optional): + An absolute path to the slurm config file to use. The default is + None, so libslurm will automatically detect its config. + """ + slurm.slurm_init(cstr.from_unicode(config_path)) + + +def slurm_fini(): + """Clean up data structures previously allocated through slurm_init.""" + slurm.slurm_fini() diff --git a/pyslurm/core/__init__.pxd b/pyslurm/core/__init__.pxd new file mode 100644 index 00000000..e69de29b diff --git a/pyslurm/core/__init__.py b/pyslurm/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pyslurm/core/common/__init__.pxd b/pyslurm/core/common/__init__.pxd new file mode 100644 index 00000000..7915de2f --- /dev/null +++ b/pyslurm/core/common/__init__.pxd @@ -0,0 +1,32 @@ +######################################################################### +# common/__init__.pxd - common/utility functions +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.slurm cimport xfree, try_xmalloc, xmalloc +from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t +from pyslurm.core.common cimport cstr +from libc.stdlib cimport free + +cpdef uid_to_name(uint32_t uid, err_on_invalid=*, dict lookup=*) +cpdef gid_to_name(uint32_t gid, err_on_invalid=*, dict lookup=*) diff --git a/pyslurm/core/common/__init__.pyx b/pyslurm/core/common/__init__.pyx new file mode 100644 index 00000000..6ad5ae47 --- /dev/null +++ b/pyslurm/core/common/__init__.pyx @@ -0,0 +1,349 @@ +######################################################################### +# common/__init__.pyx - common/utility functions +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from grp import getgrgid, getgrnam, getgrall +from pwd import getpwuid, getpwnam, getpwall +from os import getuid, getgid +from itertools import chain +import re +import signal + + +MEMORY_UNITS = { + "K": 2**10.0, + "M": 2**20.0, + "G": 2**30.0, + "T": 2**40.0, + "P": 2**50.0, + "E": 2**60.0, + "Z": 2**70.0 +} + + +cpdef uid_to_name(uint32_t uid, err_on_invalid=True, dict lookup={}): + """Translate UID to a User-Name.""" + if uid == slurm.NO_VAL or uid == slurm.INFINITE: + return None + + if lookup: + try: + name = lookup[uid] + return name + except KeyError as e: + if err_on_invalid: + raise e + else: + try: + name = getpwuid(uid).pw_name + return name + except KeyError as e: + if err_on_invalid: + raise e + + return None + + +cpdef gid_to_name(uint32_t gid, err_on_invalid=True, dict lookup={}): + """Translate a uid to a Group-Name.""" + if gid == slurm.NO_VAL or gid == slurm.INFINITE: + return None + + if lookup: + try: + name = lookup[gid] + return name + except KeyError as e: + if err_on_invalid: + raise e + else: + try: + name = getgrgid(gid).gr_name + return name + except KeyError as e: + if err_on_invalid: + raise e + + return None + + +def user_to_uid(user, err_on_invalid=True): + """Translate User-Name to a uid.""" + if user is None: + return slurm.NO_VAL + + try: + if isinstance(user, str): + return getpwnam(user).pw_uid + + return getpwuid(user).pw_uid + except KeyError as e: + if err_on_invalid: + raise e + + return getuid() + + +def group_to_gid(group, err_on_invalid=True): + """Translate a Group-Name to a gid.""" + if group is None: + return slurm.NO_VAL + + try: + if isinstance(group, str): + return getgrnam(group).gr_gid + + return getgrgid(group).gr_gid + except KeyError as e: + if err_on_invalid: + raise e + + return getgid() + + +def _getgrall_to_dict(): + cdef list groups = getgrall() + cdef dict grp_info = {item.gr_gid: item.gr_name for item in groups} + return grp_info + + +def _getpwall_to_dict(): + cdef list passwd = getpwall() + cdef dict pw_info = {item.pw_uid: item.pw_name for item in passwd} + return pw_info + + +def expand_range_str(range_str): + """Expand a ranged string of numbers to a list of unique values. + + Args: + range_str (str): + A range string, which can for example look like this: + "1,2,3-10,11,15-20" + + Returns: + list: List of unique values + """ + ret = [] + for mrange in range_str.split(","): + start, sep, end = mrange.partition("-") + start = int(start) + + if sep: + ret += range(start, int(end)+1) + else: + ret.append(start) + + return ret + + +def nodelist_from_range_str(nodelist): + """Convert a bracketed nodelist str with ranges to a list. + + Args: + nodelist (Union[str, list]): + Comma-seperated str or list with potentially bracketed hostnames + and ranges. + + Returns: + list: List of all nodenames or None on failure + """ + if isinstance(nodelist, list): + nodelist = ",".join(nodelist) + + cdef: + char *nl = nodelist + slurm.hostlist_t hl + char *hl_unranged = NULL + + hl = slurm.slurm_hostlist_create(nl) + if not hl: + return [] + + hl_unranged = slurm.slurm_hostlist_deranged_string_malloc(hl) + out = cstr.to_list(hl_unranged) + + free(hl_unranged) + slurm.slurm_hostlist_destroy(hl) + + return out + + +def nodelist_to_range_str(nodelist): + """Convert a list of nodes to a bracketed str with ranges. + + Args: + nodelist (Union[str, list]): + Comma-seperated str or list with unique, unbracketed nodenames. + + Returns: + str: Bracketed, ranged nodelist or None on failure. + """ + if isinstance(nodelist, list): + nodelist = ",".join(nodelist) + + cdef: + char *nl = nodelist + slurm.hostlist_t hl + char *hl_ranged = NULL + + hl = slurm.slurm_hostlist_create(nl) + if not hl: + return None + + hl_ranged = slurm.slurm_hostlist_ranged_string_malloc(hl) + out = cstr.to_unicode(hl_ranged) + + free(hl_ranged) + slurm.slurm_hostlist_destroy(hl) + + return out + + +def humanize(num, decimals=1): + """Humanize a number. + + This will convert the number to a string and add appropriate suffixes like + M,G,T,P,... + + Args: + num (int): + Number to humanize + decimals (int, optional): + Amount of decimals the humanized string should have. + + Returns: + str: Humanized number with appropriate suffix. + """ + if num is None or num == "unlimited": + return num + + num = int(num) + for unit in ["M", "G", "T", "P", "E", "Z"]: + if abs(num) < 1024.0: + return f"{num:3.{decimals}f}{unit}" + num /= 1024.0 + + return f"{num:.{decimals}f}Y" + + +def dehumanize(humanized_str, target="M", decimals=0): + """Dehumanize a previously humanized value. + + Args: + humanized_str (str): + A humanized str, for example "5M" or "10T" + target (str): + Target unit. The default is "M" (Mebibytes). Allowed values are + K,M,G,T,P,E,Z + decimals (int): + Amount of decimal places the result should have. Default is 0 + + Returns: + int: Dehumanized value + """ + if not humanized_str: + return None + + units_str = " ".join(MEMORY_UNITS.keys()) + splitted = re.split(f'([{units_str}])', str(humanized_str)) + + if len(splitted) == 1: + try: + return int(humanized_str) + except ValueError as e: + raise ValueError(f"Invalid value specified: {humanized_str}") + + val = float(splitted[0]) + unit = splitted[1] + + val_in_bytes = val * MEMORY_UNITS[unit] + val_in_target_size = float(val_in_bytes / MEMORY_UNITS[target]) + + if not decimals: + return round(val_in_target_size) + else: + return float(f"{val_in_target_size:.{decimals}f}") + + +def signal_to_num(sig): + if not sig: + return None + + try: + if str(sig).isnumeric(): + _sig = signal.Signals(int(sig)).value + else: + _sig = signal.Signals[sig].value + except Exception: + raise ValueError(f"Invalid Signal: {sig}.") from None + + return _sig + + +def cpubind_to_num(cpu_bind): + cdef uint32_t flags = 0 + + if not cpu_bind: + return flags + + cpu_bind = cpu_bind.casefold().split(",") + + if "none" in cpu_bind: + flags |= slurm.CPU_BIND_NONE + elif "sockets" in cpu_bind: + flags |= slurm.CPU_BIND_TO_SOCKETS + elif "ldoms" in cpu_bind: + flags |= slurm.CPU_BIND_TO_LDOMS + elif "cores" in cpu_bind: + flags |= slurm.CPU_BIND_TO_CORES + elif "threads" in cpu_bind: + flags |= slurm.CPU_BIND_TO_THREADS + elif "off" in cpu_bind: + flags |= slurm.CPU_BIND_OFF + if "verbose" in cpu_bind: + flags |= slurm.CPU_BIND_VERBOSE + + return flags + + +def instance_to_dict(inst): + cdef dict out = {} + for attr in dir(inst): + val = getattr(inst, attr) + if attr.startswith("_") or callable(val): + # Ignore everything starting with "_" and all functions. + continue + out[attr] = val + + return out + + +def _sum_prop(obj, name, startval=0): + val = startval + for n in obj.values(): + v = name.__get__(n) + if v is not None: + val += v + + return val diff --git a/pyslurm/core/common/cstr.pxd b/pyslurm/core/common/cstr.pxd new file mode 100644 index 00000000..b1719bde --- /dev/null +++ b/pyslurm/core/common/cstr.pxd @@ -0,0 +1,39 @@ +######################################################################### +# common/cstr.pxd - slurm string functions +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.slurm cimport xfree, try_xmalloc, xmalloc +from libc.string cimport memcpy, strlen + +cdef char *from_unicode(s) +cdef to_unicode(char *s, default=*) +cdef fmalloc(char **old, val) +cdef fmalloc2(char **p1, char **p2, val) +cdef free_array(char **arr, count) +cpdef list to_list(char *str_list) +cdef from_list(char **old, vals, delim=*) +cdef from_list2(char **p1, char **p2, vals, delim=*) +cpdef dict to_dict(char *str_dict, str delim1=*, str delim2=*) +cdef from_dict(char **old, vals, prepend=*, str delim1=*, str delim2=*) +cpdef dict to_gres_dict(char *gres) diff --git a/pyslurm/core/common/cstr.pyx b/pyslurm/core/common/cstr.pyx new file mode 100644 index 00000000..8301c994 --- /dev/null +++ b/pyslurm/core/common/cstr.pyx @@ -0,0 +1,287 @@ +######################################################################### +# common/cstr.pyx - pyslurm string functions +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +import re + +cdef bytes NULL_BYTE = "\0".encode("ascii") +cdef bytes NONE_BYTE = "None".encode("ascii") + +cdef char *from_unicode(s): + """Convert Python3 str (unicode) to char* (no malloc) + + Note + The lifetime of this char* depends on the lifetime of the equivalent + python-object passed in. If the python-object is gone, the char* cannot + be used safely anymore. + """ + if not s: + return NULL + + _s = str(s) + return _s + + +cdef to_unicode(char *_str, default=None): + """Convert a char* to Python3 str (unicode)""" + if _str and _str[0] != NULL_BYTE: + if _str == NONE_BYTE: + return None + + return _str + else: + return default + + +cdef fmalloc2(char **p1, char **p2, val): + """Like fmalloc, but copies the value to 2 char pointers.""" + fmalloc(p1, val) + fmalloc(p2, val) + + +cdef fmalloc(char **old, val): + """Try to free first and then create xmalloc'ed char* from str. + + Note: Uses Slurm's memory allocator. + """ + # TODO: Consider doing some size checks on the input by having an extra + # argument like "max_size" which is configurable. Otherwise infinitely huge + # strings could just be passed in and consume a lot of memory which would + # allow for a denial of service attack on services that use pyslurm. + cdef: + const char *tmp = NULL + size_t siz + + # Free the previous allocation (if neccessary) + xfree(old[0]) + + # Consider: Maybe every string containing a \0 should just + # be rejected with an Exception instead of silently cutting + # everything after \0 off? + + if val and val[0] != "\0": + # Let Cython convert the Python-string to a char* + # which will be NUL-terminated. + tmp = val + + # Get the length of the char*, include space for NUL character + siz = strlen(tmp) + 1 + + old[0] = slurm.try_xmalloc(siz) + if not old[0]: + raise MemoryError("xmalloc failed for char*") + + memcpy(old[0], tmp, siz) + else: + old[0] = NULL + + +cpdef list to_list(char *str_list): + """Convert C-String to a list.""" + cdef str ret = to_unicode(str_list) + + if not ret: + return [] + + return ret.split(",") + + +def list_to_str(vals, delim=","): + """Convert list to a C-String.""" + cdef object final = vals + + if vals and not isinstance(vals, str): + final = delim.join(vals) + + return final + + +cdef from_list(char **old, vals, delim=","): + fmalloc(old, list_to_str(vals, delim)) + + +cdef from_list2(char **p1, char **p2, vals, delim=","): + from_list(p1, vals, delim) + from_list(p2, vals, delim) + + +cpdef dict to_dict(char *str_dict, str delim1=",", str delim2="="): + """Convert a char* key=value pair to dict. + + With a char* Slurm represents key-values pairs usually in the form of: + key1=value1,key2=value2 + which can easily be converted to a dict. + """ + cdef: + str _str_dict = to_unicode(str_dict) + str key, val + dict out = {} + + if not _str_dict or delim1 not in _str_dict: + return out + + for kv in _str_dict.split(delim1): + if delim2 in kv: + key, val = kv.split(delim2, 1) + out[key] = val + + return out + + +def validate_str_key_value_format(val, delim1=",", delim2="="): + cdef dict out = {} + + for kv in val.split(delim1): + if delim2 in kv: + k, v = kv.split(delim2) + out[k] = v + else: + raise ValueError( + f"Invalid format for key-value pair {kv}. " + f"Expected {delim2} as seperator." + ) + + return out + + +def dict_to_str(vals, prepend=None, delim1=",", delim2="="): + """Convert a dict (or str) to Slurm Key-Value pair. + + Slurm predominantly uses a format of: + key1=value1,key2=value2,... + + for Key/Value type things, which can be easily created from a dict. + + A String which already has this form can also be passed in. The correct + format of this string will the be validated. + """ + cdef: + tmp_dict = {} if not vals else vals + list tmp = [] + + if not vals: + return None + + if isinstance(vals, str): + tmp_dict = validate_str_key_value_format(vals, delim1, delim2) + + for k, v in tmp_dict.items(): + if ((delim1 in k or delim2 in k) or + delim1 in v or delim2 in v): + raise ValueError( + f"Key or Value cannot contain either {delim1} or {delim2}. " + f"Got Key: {k} and Value: {v}." + ) + + tmp.append(f"{'' if not prepend else prepend}{k}{delim2}{v}") + + return delim1.join(tmp) + + +cdef from_dict(char **old, vals, prepend=None, + str delim1=",", str delim2="="): + fmalloc(old, dict_to_str(vals, prepend, delim1, delim2)) + + +cpdef dict to_gres_dict(char *gres): + """Parse a GRES string.""" + cdef: + dict output = {} + str gres_str = to_unicode(gres) + + if not gres_str or gres_str == "(null)": + return {} + + for item in re.split(",(?=[^,]+?:)", gres_str): + + # Remove the additional "gres" specifier if it exists + if "gres:" in item: + item = item.replace("gres:", "") + + gres_splitted = re.split( + ":(?=[^:]+?)", + item.replace("(", ":", 1).replace(")", "") + ) + + name, typ, cnt = gres_splitted[0], gres_splitted[1], 0 + + # Check if we have a gres type. + if typ.isdigit(): + cnt = typ + typ = None + else: + cnt = gres_splitted[2] + + # Dict Key-Name depends on if we have a gres type or not + name_and_typ = f"{name}:{typ}" if typ else name + + if not "IDX" in gres_splitted: + # Check if we need to parse the exact GRES index when coming from + # job_resources_t. + output[name_and_typ] = int(cnt) + else: + # Cover cases with IDX + idx = gres_splitted[3] if not typ else gres_splitted[4] + output[name_and_typ] = { + "count": cnt, + "indexes": idx, + } + + return output + + +def from_gres_dict(vals, typ=""): + final = [] + gres_dict = {} if not vals else vals + + if not vals: + return None + + if isinstance(vals, str) and not vals.isdigit(): + gres_dict = {} + gres_list = vals.replace("gres:", "") + for gres_str in gres_list.split(","): + gres_and_type, cnt = gres_str.rsplit(":", 1) + gres_dict.update({gres_and_type: int(cnt)}) + elif not isinstance(vals, dict): + return f"gres:{typ}:{int(vals)}" + + for gres_and_type, cnt in gres_dict.items(): + # Error immediately on specifications that contain more than one + # semicolon, as it is wrong. + if len(gres_and_type.split(":")) > 2: + raise ValueError(f"Invalid specifier: '{gres_and_type}'") + + if typ not in gres_and_type: + gres_and_type = f"{gres_and_type}:{typ}" + + final.append(f"gres:{gres_and_type}:{int(cnt)}") + + return ",".join(final) + + +cdef free_array(char **arr, count): + for i in range(count): + xfree(arr[i]) + + xfree(arr) diff --git a/pyslurm/core/common/ctime.pxd b/pyslurm/core/common/ctime.pxd new file mode 100644 index 00000000..d8abb12d --- /dev/null +++ b/pyslurm/core/common/ctime.pxd @@ -0,0 +1,32 @@ +######################################################################### +# ctime.pxd - wrappers around slurm time functions +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.core.common cimport cstr +from libc.stdint cimport uint32_t + +cdef extern from 'time.h' nogil: + ctypedef long time_t + double difftime(time_t time1, time_t time2) + time_t time(time_t *t) diff --git a/pyslurm/core/common/ctime.pyx b/pyslurm/core/common/ctime.pyx new file mode 100644 index 00000000..fdf68834 --- /dev/null +++ b/pyslurm/core/common/ctime.pyx @@ -0,0 +1,213 @@ +######################################################################### +# ctime.pyx - wrappers around slurm time functions +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +import datetime + + +def timestr_to_secs(timestr): + """Convert Slurm Timestring to seconds + + Args: + timestr (str): + A Timestring compatible with Slurms time functions. + + Returns: + int: Amount of time in seconds + """ + cdef: + char *tmp = NULL + uint32_t secs + + if timestr is None: + return slurm.NO_VAL + elif timestr == "unlimited": + return slurm.INFINITE + + if str(timestr).isdigit(): + timestr = "00:00:{}".format(timestr) + + tmp = cstr.from_unicode(timestr) + secs = slurm.slurm_time_str2secs(tmp) + + if secs == slurm.NO_VAL: + raise ValueError(f"Invalid Time Specification: {timestr}.") + + return secs + + +def timestr_to_mins(timestr): + """Convert Slurm Timestring to minutes + + Args: + timestr (str): + A Timestring compatible with Slurms time functions. + + Returns: + int: Amount of time in minutes + """ + cdef: + char *tmp = NULL + uint32_t mins + + if timestr is None: + return slurm.NO_VAL + elif timestr == "unlimited": + return slurm.INFINITE + + tmp = cstr.from_unicode(timestr) + mins = slurm.slurm_time_str2mins(tmp) + + if mins == slurm.NO_VAL: + raise ValueError(f"Invalid Time Specification: {timestr}.") + + return mins + + +def secs_to_timestr(secs, default=None): + """Parse time in seconds to Slurm Timestring + + Args: + secs (int): + Amount of seconds to convert + + Returns: + str: A Slurm timestring + """ + cdef char time_line[32] + + if secs == slurm.NO_VAL or secs is None: + return default + elif secs != slurm.INFINITE: + slurm.slurm_secs2time_str( + secs, + time_line, + sizeof(time_line) + ) + + tmp = cstr.to_unicode(time_line) + if tmp == "00:00:00": + return None + else: + return tmp + else: + return "unlimited" + + +def mins_to_timestr(mins, default=None): + """Parse time in minutes to Slurm Timestring + + Args: + mins (int): + Amount of minutes to convert + + Returns: + str: A Slurm timestring + """ + cdef char time_line[32] + + if mins == slurm.NO_VAL or mins is None: + return default + elif mins != slurm.INFINITE: + slurm.slurm_mins2time_str( + mins, + time_line, + sizeof(time_line) + ) + + tmp = cstr.to_unicode(time_line) + if tmp == "00:00:00": + return None + else: + return tmp + else: + return "unlimited" + + +def date_to_timestamp(date, on_nodate=0): + """Parse Date to Unix timestamp + + Args: + date (Union[str, int, datetime.datetime]): + A date to convert to a Unix timestamp. + + Returns: + int: A unix timestamp + """ + cdef: + time_t tmp_time + char* tmp_char = NULL + + if not date: + # time_t of 0, so the option will be ignored by slurmctld + return on_nodate + elif str(date).isdigit(): + # Allow the user to pass a timestamp directly. + return int(date) + elif isinstance(date, datetime.datetime): + # Allow the user to pass a datetime.datetime object. + return int(date.timestamp()) + + tmp_char = cstr.from_unicode(date) + tmp_time = slurm.slurm_parse_time(tmp_char, 0) + + if not tmp_time: + raise ValueError(f"Invalid Time Specification: {date}") + + return tmp_time + + +def timestamp_to_date(timestamp): + """Parse Unix timestamp to Slurm Date-string + + Args: + timestamp (int): + A Unix timestamp that should be converted. + + Returns: + str: A Slurm date timestring + """ + cdef: + char time_str[32] + time_t _time = timestamp + + if _time == slurm.NO_VAL: + return None + + # slurm_make_time_str returns 'Unknown' if 0 or slurm.INFINITE + slurm.slurm_make_time_str(&_time, time_str, sizeof(time_str)) + + ret = cstr.to_unicode(time_str) + if ret == "Unknown": + return None + + return ret + + +def _raw_time(time, default=None): + if (time == slurm.NO_VAL or + time == 0 or + time == slurm.INFINITE): + return default + + return time diff --git a/pyslurm/core/common/uint.pxd b/pyslurm/core/common/uint.pxd new file mode 100644 index 00000000..0fd38739 --- /dev/null +++ b/pyslurm/core/common/uint.pxd @@ -0,0 +1,43 @@ +######################################################################### +# common/uint.pxd - functions dealing with parsing uint types +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t + +cpdef u8(val, inf=*, noval=*, on_noval=*, zero_is_noval=*) +cpdef u16(val, inf=*, noval=*, on_noval=*, zero_is_noval=*) +cpdef u32(val, inf=*, noval=*, on_noval=*, zero_is_noval=*) +cpdef u64(val, inf=*, noval=*, on_noval=*, zero_is_noval=*) +cpdef u8_parse(uint8_t val, on_inf=*, on_noval=*, noval=*, zero_is_noval=*) +cpdef u16_parse(uint16_t val, on_inf=*, on_noval=*, noval=*, zero_is_noval=*) +cpdef u32_parse(uint32_t val, on_inf=*, on_noval=*, noval=*, zero_is_noval=*) +cpdef u64_parse(uint64_t val, on_inf=*, on_noval=*, noval=*, zero_is_noval=*) +cpdef u8_bool(val) +cpdef u16_bool(val) +cdef u8_parse_bool(uint8_t val) +cdef u16_parse_bool(uint16_t val) +cdef u64_parse_bool_flag(uint64_t flags, flag) +cdef u64_set_bool_flag(uint64_t *flags, boolean, flag_val) +cdef u16_parse_bool_flag(uint16_t flags, flag) +cdef u16_set_bool_flag(uint16_t *flags, boolean, flag_val) diff --git a/pyslurm/core/common/uint.pyx b/pyslurm/core/common/uint.pyx new file mode 100644 index 00000000..7418e109 --- /dev/null +++ b/pyslurm/core/common/uint.pyx @@ -0,0 +1,181 @@ +######################################################################### +# common/uint.pyx - functions dealing with parsing uint types +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + + +cpdef u8(val, inf=False, noval=slurm.NO_VAL8, on_noval=slurm.NO_VAL8, zero_is_noval=True): + """Try to convert arbitrary 'val' to uint8_t""" + if val is None or (val == 0 and zero_is_noval) or val == noval: + return on_noval + elif inf and val == "unlimited": + return slurm.INFINITE8 + else: + if isinstance(val, str) and val.isdigit(): + return int(val) + + return val + + +cpdef u8_parse(uint8_t val, on_inf="unlimited", on_noval=None, noval=slurm.NO_VAL8, zero_is_noval=True): + """Convert uint8_t to Python int (with a few situational parameters)""" + if val == noval or (val == 0 and zero_is_noval): + return on_noval + elif val == slurm.INFINITE8: + return on_inf + else: + return val + + +cpdef u16(val, inf=False, noval=slurm.NO_VAL16, on_noval=slurm.NO_VAL16, zero_is_noval=True): + """Try to convert arbitrary 'val' to uint16_t""" + if val is None or (val == 0 and zero_is_noval) or val == noval: + return on_noval + elif inf and val == "unlimited": + return slurm.INFINITE16 + else: + if isinstance(val, str) and val.isdigit(): + return int(val) + + return val + + +cpdef u16_parse(uint16_t val, on_inf="unlimited", on_noval=None, noval=slurm.NO_VAL16, zero_is_noval=True): + """Convert uint16_t to Python int (with a few situational parameters)""" + if val == noval or (val == 0 and zero_is_noval): + return on_noval + elif val == slurm.INFINITE16: + return on_inf + else: + return val + + +cpdef u32(val, inf=False, noval=slurm.NO_VAL, on_noval=slurm.NO_VAL, zero_is_noval=True): + """Try to convert arbitrary 'val' to uint32_t""" + if val is None or (val == 0 and zero_is_noval) or val == noval: + return on_noval + elif inf and val == "unlimited": + return slurm.INFINITE + else: + if isinstance(val, str) and val.isdigit(): + return int(val) + + return val + + +cpdef u32_parse(uint32_t val, on_inf="unlimited", on_noval=None, noval=slurm.NO_VAL, zero_is_noval=True): + """Convert uint32_t to Python int (with a few situational parameters)""" + if val == noval or (val == 0 and zero_is_noval): + return on_noval + elif val == slurm.INFINITE: + return on_inf + else: + return val + + +cpdef u64(val, inf=False, noval=slurm.NO_VAL64, on_noval=slurm.NO_VAL64, zero_is_noval=True): + """Try to convert arbitrary 'val' to uint64_t""" + if val is None or (val == 0 and zero_is_noval) or val == noval: + return on_noval + elif inf and val == "unlimited": + return slurm.INFINITE64 + else: + if isinstance(val, str) and val.isdigit(): + return int(val) + + return val + + +cpdef u64_parse(uint64_t val, on_inf="unlimited", on_noval=None, noval=slurm.NO_VAL64, zero_is_noval=True): + """Convert uint64_t to Python int (with a few situational parameters)""" + if val == noval or (val == 0 and zero_is_noval): + return on_noval + elif val == slurm.INFINITE64: + return on_inf + else: + return val + + +cpdef u8_bool(val): + if val is None: + return slurm.NO_VAL8 + elif val: + return 1 + else: + return 0 + + +cpdef u16_bool(val): + if val is None: + return slurm.NO_VAL16 + elif val: + return 1 + else: + return 0 + + +cdef u8_parse_bool(uint8_t val): + if not val or val == slurm.NO_VAL8: + return False + + return True + + +cdef u16_parse_bool(uint16_t val): + if not val or val == slurm.NO_VAL16: + return False + + return True + + +cdef u64_set_bool_flag(uint64_t *flags, boolean, flag_val): + if boolean: + flags[0] |= flag_val + else: + flags[0] &= ~flag_val + + +cdef u64_parse_bool_flag(uint64_t flags, flag): + if flags == slurm.NO_VAL: + return False + + if flags & flag: + return True + else: + return False + + +cdef u16_set_bool_flag(uint16_t *flags, boolean, flag_val): + if boolean: + flags[0] |= flag_val + else: + flags[0] &= ~flag_val + + +cdef u16_parse_bool_flag(uint16_t flags, flag): + if flags == slurm.NO_VAL16: + return False + + if flags & flag: + return True + else: + return False diff --git a/pyslurm/core/db/__init__.pxd b/pyslurm/core/db/__init__.pxd new file mode 100644 index 00000000..e69de29b diff --git a/pyslurm/core/db/__init__.py b/pyslurm/core/db/__init__.py new file mode 100644 index 00000000..a742f72b --- /dev/null +++ b/pyslurm/core/db/__init__.py @@ -0,0 +1,37 @@ +######################################################################### +# db/__init__.py - database package __init__ file +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from pyslurm.core.db.connection import Connection +from pyslurm.core.db.step import JobStep +from pyslurm.core.db.job import ( + Job, + Jobs, + JobSearchFilter, +) +from pyslurm.core.db.tres import ( + TrackableResource, + TrackableResources, +) +from pyslurm.core.db.qos import ( + QualitiesOfService, + QualityOfService, + QualityOfServiceSearchFilter, +) diff --git a/pyslurm/core/db/connection.pxd b/pyslurm/core/db/connection.pxd new file mode 100644 index 00000000..6ac2dfc6 --- /dev/null +++ b/pyslurm/core/db/connection.pxd @@ -0,0 +1,43 @@ +######################################################################### +# connection.pxd - pyslurm slurmdbd database connection +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from libc.stdint cimport uint16_t +from pyslurm.slurm cimport ( + slurmdb_connection_get, + slurmdb_connection_close, + slurmdb_connection_commit, +) + + +cdef class Connection: + """A connection to the slurmdbd. + + Attributes: + is_open (bool): + Whether the connection is open or closed. + """ + cdef: + void *ptr + uint16_t flags diff --git a/pyslurm/core/db/connection.pyx b/pyslurm/core/db/connection.pyx new file mode 100644 index 00000000..ff32dd92 --- /dev/null +++ b/pyslurm/core/db/connection.pyx @@ -0,0 +1,79 @@ +######################################################################### +# connection.pyx - pyslurm slurmdbd database connection +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm.core.error import RPCError + + +cdef class Connection: + + def __cinit__(self): + self.ptr = NULL + self.flags = 0 + + def __init__(self): + raise RuntimeError("A new connection should be created through " + "calling Connection.open()") + + def __dealloc__(self): + self.close() + + @staticmethod + def open(): + """Open a new connection to the slurmdbd + + Raises: + RPCError: When opening the connection fails + + Returns: + (Connection): Connection to slurmdbd + """ + cdef Connection conn = Connection.__new__(Connection) + conn.ptr = slurmdb_connection_get(&conn.flags) + if not conn.ptr: + raise RPCError(msg="Failed to open onnection to slurmdbd") + + return conn + + def close(self): + """Close the current connection.""" + if self.is_open: + slurmdb_connection_close(&self.ptr) + self.ptr = NULL + + def commit(self): + """Commit recent changes.""" + if slurmdb_connection_commit(self.ptr, 1) == slurm.SLURM_ERROR: + raise RPCError("Failed to commit database changes.") + + def rollback(self): + """Rollback recent changes.""" + if slurmdb_connection_commit(self.ptr, 0) == slurm.SLURM_ERROR: + raise RPCError("Failed to rollback database changes.") + + @property + def is_open(self): + if self.ptr: + return True + else: + return False diff --git a/pyslurm/core/db/job.pxd b/pyslurm/core/db/job.pxd new file mode 100644 index 00000000..2b220a05 --- /dev/null +++ b/pyslurm/core/db/job.pxd @@ -0,0 +1,279 @@ +######################################################################### +# job.pxd - pyslurm slurmdbd job api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.slurm cimport ( + slurmdb_job_rec_t, + slurmdb_job_cond_t, + slurmdb_step_rec_t, + slurmdb_jobs_get, + slurmdb_destroy_job_cond, + slurmdb_destroy_job_rec, + slurmdb_destroy_step_rec, + slurm_destroy_selected_step, + slurm_selected_step_t, + slurm_list_create, + slurm_list_append, + try_xmalloc, + slurmdb_job_cond_def_start_end, + slurm_job_state_string, + slurm_job_reason_string, +) +from pyslurm.core.db.util cimport ( + SlurmList, + SlurmListItem, + make_char_list, +) +from pyslurm.core.db.step cimport JobStep, JobSteps +from pyslurm.core.db.stats cimport JobStats +from pyslurm.core.db.connection cimport Connection +from pyslurm.core.common cimport cstr +from pyslurm.core.db.qos cimport QualitiesOfService + + +cdef class JobSearchFilter: + """Search conditions for Slurm database Jobs. + + Args: + **kwargs: + Any valid attribute of the object. + + Attributes: + ids (list): + A list of Job ids to search for. + start_time (Union[str, int, datetime.datetime]): + Search for Jobs which started after this time. + end_time (Union[str, int, datetime.datetime]): + Search for Jobs which ended before this time. + accounts (list): + Search for Jobs with these account names. + association_ids (list): + Search for Jobs with these association ids. + clusters (list): + Search for Jobs running in these clusters. + constraints (list): + Search for Jobs with these constraints. + cpus (int): + Search for Jobs with exactly this many CPUs. + Note: If you also specify max_cpus, then this value will act as + the minimum. + max_cpus (int): + Search for Jobs with no more than this amount of CPUs. + Note: This value has no effect without also setting cpus. + nodes (int): + Search for Jobs with exactly this many nodes. + Note: If you also specify max_nodes, then this value will act as + the minimum. + max_nodes (int): + Search for Jobs with no more than this amount of nodes. + Note: This value has no effect without also setting nodes. + qos (list): + Search for Jobs with these Qualities of Service. + names (list): + Search for Jobs with these job names. + partitions (list): + Search for Jobs with these partition names. + groups (list): + Search for Jobs with these group names. You can both specify the + groups as string or by their GID. + timelimit (Union[str, int]): + Search for Jobs with exactly this timelimit. + Note: If you also specify max_timelimit, then this value will act + as the minimum. + max_timelimit (Union[str, int]): + Search for Jobs which run no longer than this timelimit + Note: This value has no effect without also setting timelimit + users (list): + Search for Jobs with these user names. You can both specify the + users as string or by their UID. + wckeys (list): + Search for Jobs with these WCKeys + nodelist (list): + Search for Jobs that ran on any of these Nodes + with_script (bool): + Instruct the slurmdbd to also send the job script(s) + Note: This requires specifying explictiy job ids, and is mutually + exclusive with with_env + with_env (bool): + Instruct the slurmdbd to also send the job environment(s) + Note: This requires specifying explictiy job ids, and is mutually + exclusive with with_script + """ + cdef slurmdb_job_cond_t *ptr + + cdef public: + ids + start_time + end_time + accounts + association_ids + clusters + constraints + cpus + max_cpus + nodes + max_nodes + qualities_of_service + names + partitions + groups + timelimit + max_timelimit + users + wckeys + nodelist + with_script + with_env + + +cdef class Jobs(dict): + """A collection of Database Jobs.""" + cdef: + SlurmList info + Connection db_conn + + +cdef class Job: + """A Slurm Database Job. + + Args: + job_id (int): + An Integer representing a Job-ID. + + Raises: + MemoryError: If malloc fails to allocate memory. + + Attributes: + steps (pyslurm.db.JobSteps): + Steps this Job has + stats (pyslurm.db.JobStats): + Utilization statistics of this Job + account (str): + Account of the Job. + admin_comment (str): + Admin comment for the Job. + num_nodes (int): + Amount of nodes this Job has allocated (if it is running) or + requested (if it is still pending). + array_id (int): + The master Array-Job ID. + array_tasks_parallel (int): + Max number of array tasks allowed to run simultaneously. + array_task_id (int): + Array Task ID of this Job if it is an Array-Job. + array_tasks_waiting (str): + Array Tasks that are still waiting. + association_id (int): + ID of the Association this job runs in. + block_id (str): + Name of the block used (for BlueGene Systems) + cluster (str): + Cluster this Job belongs to + constraints (str): + Constraints of the Job + container (str): + Path to OCI Container bundle + db_index (int): + Unique database index of the Job in the job table + derived_exit_code (int): + Highest exit code of all the Job steps + derived_exit_code_signal (int): + Signal of the derived exit code + comment (str): + Comment for the Job + elapsed_time (int): + Amount of seconds elapsed for the Job + eligible_time (int): + When the Job became eligible to run, as a unix timestamp + end_time (int): + When the Job ended, as a unix timestamp + exit_code (int): + Exit code of the job script or salloc. + exit_code_signal (int): + Signal of the exit code for this Job. + group_id (int): + ID of the group for this Job + group_name (str): + Name of the group for this Job + id (int): + ID of the Job + name (str): + Name of the Job + mcs_label (str): + MCS Label of the Job + nodelist (str): + Nodes this Job is using + partition (str): + Name of the Partition for this Job + priority (int): + Priority for the Job + quality_of_service (str): + Name of the Quality of Service for the Job + cpus (int): + Amount of CPUs the Job has/had allocated, or, if the Job is still + pending, this will reflect the amount requested. + memory (int): + Amount of memory the Job requested in total + reservation (str): + Name of the Reservation for this Job + script (str): + The batch script for this Job. + Note: Only available if the "with_script" condition was given + start_time (int): + Time when the Job started, as a unix timestamp + state (str): + State of the Job + state_reason (str): + Last reason a Job was blocked from running + cancelled_by (str): + Name of the User who cancelled this Job + submit_time (int): + Time the Job was submitted, as a unix timestamp + submit_command (str): + Full command issued to submit the Job + suspended_time (int): + Amount of seconds the Job was suspended + system_comment (str): + Arbitrary System comment for the Job + time_limit (int): + Time limit of the Job in minutes + user_id (int): + UID of the User this Job belongs to + user_name (str): + Name of the User this Job belongs to + wckey (str): + Name of the WCKey for this Job + working_directory (str): + Working directory of the Job + """ + cdef: + slurmdb_job_rec_t *ptr + QualitiesOfService qos_data + + cdef public: + JobSteps steps + JobStats stats + + @staticmethod + cdef Job from_ptr(slurmdb_job_rec_t *in_ptr) diff --git a/pyslurm/core/db/job.pyx b/pyslurm/core/db/job.pyx new file mode 100644 index 00000000..d66f789e --- /dev/null +++ b/pyslurm/core/db/job.pyx @@ -0,0 +1,598 @@ +######################################################################### +# job.pyx - pyslurm slurmdbd job api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from os import WIFSIGNALED, WIFEXITED, WTERMSIG, WEXITSTATUS +from pyslurm.core.error import RPCError +from pyslurm.core.db.tres cimport TrackableResources, TrackableResource +from pyslurm.core import slurmctld +from pyslurm.core.common.uint import * +from pyslurm.core.common.ctime import ( + date_to_timestamp, + timestr_to_mins, + _raw_time, +) +from pyslurm.core.common import ( + gid_to_name, + group_to_gid, + user_to_uid, + uid_to_name, + nodelist_to_range_str, + instance_to_dict, +) + + +cdef class JobSearchFilter: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + + def __dealloc__(self): + self._dealloc() + + def _dealloc(self): + slurmdb_destroy_job_cond(self.ptr) + self.ptr = NULL + + def _alloc(self): + self._dealloc() + self.ptr = try_xmalloc(sizeof(slurmdb_job_cond_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for slurmdb_job_cond_t") + + self.ptr.db_flags = slurm.SLURMDB_JOB_FLAG_NOTSET + self.ptr.flags |= slurm.JOBCOND_FLAG_NO_TRUNC + + def _parse_qos(self): + if not self.qualities_of_service: + return None + + qos_id_list = [] + qos = QualitiesOfService.load() + for q in self.qualities_of_service: + if isinstance(q, int): + qos_id_list.append(q) + elif q in qos: + qos_id_list.append(str(qos[q].id)) + else: + raise ValueError(f"QoS {q} does not exist") + + return qos_id_list + + def _parse_groups(self): + if not self.groups: + return None + + gid_list = [] + for group in self.groups: + if isinstance(group, int): + gid_list.append(group) + else: + gid_list.append(group_to_gid(group)) + + return gid_list + + def _parse_users(self): + if not self.users: + return None + + uid_list = [] + for user in self.users: + if not isinstance(user, list): + uid_list.append(int(user)) + elif user: + uid_list.append(user_to_uid(user)) + + return uid_list + + def _parse_clusters(self): + if not self.clusters: + # Get the local cluster name + # This is a requirement for some other parameters to function + # correctly, like self.nodelist + slurm_conf = slurmctld.Config.load() + return [slurm_conf.cluster] + elif self.clusters == "all": + return None + else: + return self.clusters + + def _parse_state(self): + # TODO: implement + return None + + def _create(self): + self._alloc() + cdef: + slurmdb_job_cond_t *ptr = self.ptr + slurm_selected_step_t *selected_step + + ptr.usage_start = date_to_timestamp(self.start_time) + ptr.usage_end = date_to_timestamp(self.end_time) + slurmdb_job_cond_def_start_end(ptr) + ptr.cpus_min = u32(self.cpus, on_noval=0) + ptr.cpus_max = u32(self.max_cpus, on_noval=0) + ptr.nodes_min = u32(self.nodes, on_noval=0) + ptr.nodes_max = u32(self.max_nodes, on_noval=0) + ptr.timelimit_min = u32(timestr_to_mins(self.timelimit), on_noval=0) + ptr.timelimit_max = u32(timestr_to_mins(self.max_timelimit), + on_noval=0) + make_char_list(&ptr.acct_list, self.accounts) + make_char_list(&ptr.associd_list, self.association_ids) + make_char_list(&ptr.cluster_list, self._parse_clusters()) + make_char_list(&ptr.constraint_list, self.constraints) + make_char_list(&ptr.jobname_list, self.names) + make_char_list(&ptr.groupid_list, self._parse_groups()) + make_char_list(&ptr.userid_list, self._parse_users()) + make_char_list(&ptr.wckey_list, self.wckeys) + make_char_list(&ptr.partition_list, self.partitions) + make_char_list(&ptr.qos_list, self._parse_qos()) + make_char_list(&ptr.state_list, self._parse_state()) + + if self.nodelist: + cstr.fmalloc(&ptr.used_nodes, + nodelist_to_range_str(self.nodelist)) + + if self.ids: + # These are only allowed by the slurmdbd when specific jobs are + # requested. + if self.with_script and self.with_env: + raise ValueError("with_script and with_env are mutually " + "exclusive") + + if self.with_script: + ptr.flags |= slurm.JOBCOND_FLAG_SCRIPT + elif self.with_env: + ptr.flags |= slurm.JOBCOND_FLAG_ENV + + ptr.step_list = slurm_list_create(slurm_destroy_selected_step) + already_added = [] + for i in self.ids: + job_id = u32(i) + if job_id in already_added: + continue + + selected_step = NULL + selected_step = try_xmalloc( + sizeof(slurm_selected_step_t)) + if not selected_step: + raise MemoryError("xmalloc failed for slurm_selected_step_t") + + selected_step.array_task_id = slurm.NO_VAL + selected_step.het_job_offset = slurm.NO_VAL + selected_step.step_id.step_id = slurm.NO_VAL + selected_step.step_id.job_id = job_id + slurm_list_append(ptr.step_list, selected_step) + already_added.append(job_id) + + +cdef class Jobs(dict): + + def __init__(self, *args, **kwargs): + # TODO: ability to initialize with existing job objects + pass + + @staticmethod + def load(search_filter=None): + """Load Jobs from the Slurm Database + + Implements the slurmdb_jobs_get RPC. + + Args: + search_filter (pyslurm.db.JobSearchFilter): + A search filter that the slurmdbd will apply when retrieving + Jobs from the database. + + Raises: + RPCError: When getting the Jobs from the Database was not + sucessful + """ + cdef: + Jobs jobs = Jobs() + Job job + JobSearchFilter cond + SlurmListItem job_ptr + QualitiesOfService qos_data + + if search_filter: + cond = search_filter + else: + cond = JobSearchFilter() + + cond._create() + jobs.db_conn = Connection.open() + jobs.info = SlurmList.wrap(slurmdb_jobs_get(jobs.db_conn.ptr, + cond.ptr)) + if jobs.info.is_null: + raise RPCError(msg="Failed to get Jobs from slurmdbd") + + qos_data = QualitiesOfService.load(name_is_key=False, + db_connection=jobs.db_conn) + + # TODO: also get trackable resources with slurmdb_tres_get and store + # it in each job instance. tres_alloc_str and tres_req_str only + # contain the numeric tres ids, but it probably makes more sense to + # convert them to its type name for the user in advance. + + # TODO: For multi-cluster support, remove duplicate federation jobs + # TODO: How to handle the possibility of duplicate job ids that could + # appear if IDs on a cluster are resetted? + for job_ptr in SlurmList.iter_and_pop(jobs.info): + job = Job.from_ptr(job_ptr.data) + job.qos_data = qos_data + job._create_steps() + JobStats._sum_step_stats_for_job(job, job.steps) + jobs[job.id] = job + + return jobs + + +cdef class Job: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, job_id): + self._alloc_impl() + self.ptr.jobid = int(job_id) + + def __dealloc__(self): + self._dealloc_impl() + + def _dealloc_impl(self): + slurmdb_destroy_job_rec(self.ptr) + self.ptr = NULL + + def _alloc_impl(self): + if not self.ptr: + self.ptr = try_xmalloc( + sizeof(slurmdb_job_rec_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for slurmdb_job_rec_t") + + @staticmethod + cdef Job from_ptr(slurmdb_job_rec_t *in_ptr): + cdef Job wrap = Job.__new__(Job) + wrap.ptr = in_ptr + wrap.steps = JobSteps.__new__(JobSteps) + wrap.stats = JobStats() + return wrap + + @staticmethod + def load(job_id, with_script=False, with_env=False): + """Load the information for a specific Job from the Database. + + Args: + job_id (int): + ID of the Job to be loaded. + + Returns: + (pyslurm.db.Job): Returns a new Job instance + + Raises: + RPCError: If requesting the information for the database Job was + not sucessful. + """ + jfilter = JobSearchFilter(ids=[int(job_id)], + with_script=with_script, with_env=with_env) + jobs = Jobs.load(jfilter) + if not jobs or job_id not in jobs: + raise RPCError(msg=f"Job {job_id} does not exist") + + return jobs[job_id] + + def _create_steps(self): + cdef: + JobStep step + SlurmList step_list + SlurmListItem step_ptr + + step_list = SlurmList.wrap(self.ptr.steps, owned=False) + for step_ptr in SlurmList.iter_and_pop(step_list): + step = JobStep.from_ptr(step_ptr.data) + self.steps[step.id] = step + + def as_dict(self): + """Database Job information formatted as a dictionary. + + Returns: + (dict): Database Job information as dict + """ + cdef dict out = instance_to_dict(self) + + if self.stats: + out["stats"] = self.stats.as_dict() + + steps = out.pop("steps", {}) + out["steps"] = {} + for step_id, step in steps.items(): + out["steps"][step_id] = step.as_dict() + + return out + + @property + def account(self): + return cstr.to_unicode(self.ptr.account) + + @property + def admin_comment(self): + return cstr.to_unicode(self.ptr.admin_comment) + + @property + def num_nodes(self): + val = TrackableResources.find_count_in_str(self.ptr.tres_alloc_str, + slurm.TRES_NODE) + if val is not None: + # Job is already running and has nodes allocated + return val + else: + # Job is still pending, so we return the number of requested nodes + # instead. + val = TrackableResources.find_count_in_str(self.ptr.tres_req_str, + slurm.TRES_NODE) + return val + + @property + def array_id(self): + return u32_parse(self.ptr.array_job_id) + + @property + def array_tasks_parallel(self): + return u32_parse(self.ptr.array_max_tasks) + + @property + def array_task_id(self): + return u32_parse(self.ptr.array_task_id) + + @property + def array_tasks_waiting(self): + task_str = cstr.to_unicode(self.ptr.array_task_str) + if not task_str: + return None + + if "%" in task_str: + # We don't want this % character and everything after it + # in here, so remove it. + task_str = task_str[:task_str.rindex("%")] + + return task_str + + @property + def association_id(self): + return u32_parse(self.ptr.associd) + + @property + def block_id(self): + return cstr.to_unicode(self.ptr.blockid) + + @property + def cluster(self): + return cstr.to_unicode(self.ptr.cluster) + + @property + def constraints(self): + return cstr.to_list(self.ptr.constraints) + + @property + def container(self): + return cstr.to_list(self.ptr.container) + + @property + def db_index(self): + return u64_parse(self.ptr.db_index) + + @property + def derived_exit_code(self): + if (self.ptr.derived_ec == slurm.NO_VAL + or not WIFEXITED(self.ptr.derived_ec)): + return None + + return WEXITSTATUS(self.ptr.derived_ec) + + @property + def derived_exit_code_signal(self): + if (self.ptr.derived_ec == slurm.NO_VAL + or not WIFSIGNALED(self.ptr.derived_ec)): + return None + + return WTERMSIG(self.ptr.derived_ec) + + @property + def comment(self): + return cstr.to_unicode(self.ptr.derived_es) + + @property + def elapsed_time(self): + return _raw_time(self.ptr.elapsed) + + @property + def eligible_time(self): + return _raw_time(self.ptr.eligible) + + @property + def end_time(self): + return _raw_time(self.ptr.end) + + @property + def exit_code(self): + # TODO + return 0 + + @property + def exit_code_signal(self): + # TODO + return 0 + + # uint32_t flags + + def group_id(self): + return u32_parse(self.ptr.gid, zero_is_noval=False) + + def group_name(self): + return gid_to_name(self.ptr.gid) + + # uint32_t het_job_id + # uint32_t het_job_offset + + @property + def id(self): + return self.ptr.jobid + + @property + def name(self): + return cstr.to_unicode(self.ptr.jobname) + + # uint32_t lft + + @property + def mcs_label(self): + return cstr.to_unicode(self.ptr.mcs_label) + + @property + def nodelist(self): + return cstr.to_unicode(self.ptr.nodes) + + @property + def partition(self): + return cstr.to_unicode(self.ptr.partition) + + @property + def priority(self): + return u32_parse(self.ptr.priority, zero_is_noval=False) + + @property + def quality_of_service(self): + _qos = self.qos_data.get(self.ptr.qosid, None) + if _qos: + return _qos.name + else: + return None + + @property + def cpus(self): + val = TrackableResources.find_count_in_str(self.ptr.tres_alloc_str, + slurm.TRES_CPU) + if val is not None: + # Job is already running and has cpus allocated + return val + else: + # Job is still pending, so we return the number of requested cpus + # instead. + return u32_parse(self.ptr.req_cpus) + + @property + def memory(self): + val = TrackableResources.find_count_in_str(self.ptr.tres_req_str, + slurm.TRES_MEM) + return val + + @property + def reservation(self): + return cstr.to_unicode(self.ptr.resv_name) + +# @property +# def reservation_id(self): +# return u32_parse(self.ptr.resvid) + + @property + def script(self): + return cstr.to_unicode(self.ptr.script) + + @property + def environment(self): + return cstr.to_dict(self.ptr.env, delim1="\n", delim2="=") + + @property + def start_time(self): + return _raw_time(self.ptr.start) + + @property + def state(self): + return cstr.to_unicode(slurm_job_state_string(self.ptr.state)) + + @property + def state_reason(self): + return cstr.to_unicode(slurm_job_reason_string + (self.ptr.state_reason_prev)) + + @property + def cancelled_by(self): + return uid_to_name(self.ptr.requid) + + @property + def submit_time(self): + return _raw_time(self.ptr.submit) + + @property + def submit_command(self): + return cstr.to_unicode(self.ptr.submit_line) + + @property + def suspended_time(self): + return _raw_time(self.ptr.elapsed) + + @property + def system_comment(self): + return cstr.to_unicode(self.ptr.system_comment) + + @property + def time_limit(self): + # TODO: Perhaps we should just find out what the actual PartitionLimit + # is? + return _raw_time(self.ptr.timelimit, "PartitionLimit") + + @property + def user_id(self): + return u32_parse(self.ptr.uid, zero_is_noval=False) + + @property + def user_name(self): + # Theres also a ptr->user + # https://github.com/SchedMD/slurm/blob/6365a8b7c9480c48678eeedef99864d8d3b6a6b5/src/sacct/print.c#L1946 + return uid_to_name(self.ptr.uid) + + # TODO: used gres + + @property + def wckey(self): + return cstr.to_unicode(self.ptr.wckey) + +# @property +# def wckey_id(self): +# return u32_parse(self.ptr.wckeyid) + + @property + def working_directory(self): + return cstr.to_unicode(self.ptr.work_dir) + +# @property +# def tres_allocated(self): +# return TrackableResources.from_str(self.ptr.tres_alloc_str) + +# @property +# def tres_requested(self): +# return TrackableResources.from_str(self.ptr.tres_req_str) diff --git a/pyslurm/core/db/qos.pxd b/pyslurm/core/db/qos.pxd new file mode 100644 index 00000000..3ba59dc6 --- /dev/null +++ b/pyslurm/core/db/qos.pxd @@ -0,0 +1,65 @@ +######################################################################### +# qos.pxd - pyslurm slurmdbd qos api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.slurm cimport ( + slurmdb_qos_rec_t, + slurmdb_qos_cond_t, + slurmdb_destroy_qos_rec, + slurmdb_destroy_qos_cond, + slurmdb_qos_get, + slurm_preempt_mode_num, + try_xmalloc, +) +from pyslurm.core.db.util cimport ( + SlurmList, + SlurmListItem, + make_char_list, +) +from pyslurm.core.db.connection cimport Connection +from pyslurm.core.common cimport cstr + + +cdef class QualitiesOfService(dict): + cdef: + SlurmList info + Connection db_conn + + +cdef class QualityOfServiceSearchFilter: + cdef slurmdb_qos_cond_t *ptr + + cdef public: + names + ids + descriptions + preempt_modes + with_deleted + + +cdef class QualityOfService: + cdef slurmdb_qos_rec_t *ptr + + @staticmethod + cdef QualityOfService from_ptr(slurmdb_qos_rec_t *in_ptr) diff --git a/pyslurm/core/db/qos.pyx b/pyslurm/core/db/qos.pyx new file mode 100644 index 00000000..bd5a35de --- /dev/null +++ b/pyslurm/core/db/qos.pyx @@ -0,0 +1,194 @@ +######################################################################### +# qos.pyx - pyslurm slurmdbd qos api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm.core.error import RPCError +from pyslurm.core.common import ( + instance_to_dict, +) + + +cdef class QualitiesOfService(dict): + + def __init__(self): + pass + + @staticmethod + def load(search_filter=None, name_is_key=True, db_connection=None): + cdef: + QualitiesOfService qos_dict = QualitiesOfService() + QualityOfService qos + QualityOfServiceSearchFilter cond + SlurmListItem qos_ptr + Connection conn = db_connection + + if search_filter: + cond = search_filter + else: + cond = QualityOfServiceSearchFilter() + + cond._create() + qos_dict.db_conn = Connection.open() if not conn else conn + qos_dict.info = SlurmList.wrap(slurmdb_qos_get(qos_dict.db_conn.ptr, + cond.ptr)) + if qos_dict.info.is_null: + raise RPCError(msg="Failed to get QoS data from slurmdbd") + + for qos_ptr in SlurmList.iter_and_pop(qos_dict.info): + qos = QualityOfService.from_ptr(qos_ptr.data) + if name_is_key: + qos_dict[qos.name] = qos + else: + qos_dict[qos.id] = qos + + return qos_dict + + +cdef class QualityOfServiceSearchFilter: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + + def __dealloc__(self): + self._dealloc() + + def _dealloc(self): + slurmdb_destroy_qos_cond(self.ptr) + self.ptr = NULL + + def _alloc(self): + self._dealloc() + self.ptr = try_xmalloc(sizeof(slurmdb_qos_cond_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for slurmdb_qos_cond_t") + + def _parse_preempt_modes(self): + if not self.preempt_modes: + return 0 + + if isinstance(self.preempt_modes, int): + return self.preempt_modes + + out = 0 + for mode in self.preempt_modes: + _mode = slurm_preempt_mode_num(mode) + if _mode == slurm.NO_VAL16: + raise ValueError(f"Unknown preempt mode: {mode}") + + if _mode == slurm.PREEMPT_MODE_OFF: + _mode = slurm.PREEMPT_MODE_COND_OFF + + out |= _mode + + return out + + def _create(self): + self._alloc() + cdef slurmdb_qos_cond_t *ptr = self.ptr + + make_char_list(&ptr.name_list, self.names) + make_char_list(&ptr.id_list, self.ids) + make_char_list(&ptr.description_list, self.descriptions) + ptr.preempt_mode = self._parse_preempt_modes() + ptr.with_deleted = 1 if bool(self.with_deleted) else 0 + + +cdef class QualityOfService: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, name=None): + self._alloc_impl() + self.name = name + + def __dealloc__(self): + self._dealloc_impl() + + def _dealloc_impl(self): + slurmdb_destroy_qos_rec(self.ptr) + self.ptr = NULL + + def _alloc_impl(self): + if not self.ptr: + self.ptr = try_xmalloc( + sizeof(slurmdb_qos_rec_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for slurmdb_qos_rec_t") + + @staticmethod + cdef QualityOfService from_ptr(slurmdb_qos_rec_t *in_ptr): + cdef QualityOfService wrap = QualityOfService.__new__(QualityOfService) + wrap.ptr = in_ptr + return wrap + + def as_dict(self): + """Database QualityOfService information formatted as a dictionary. + + Returns: + (dict): Database QualityOfService information as dict + """ + return instance_to_dict(self) + + @staticmethod + def load(name): + """Load the information for a specific Quality of Service. + + Args: + name (str): + Name of the Quality of Service to be loaded. + + Returns: + (pyslurm.db.QualityOfService): Returns a new QualityOfService + instance. + + Raises: + RPCError: If requesting the information from the database was not + sucessful. + """ + qfilter = QualityOfServiceSearchFilter(names=[name]) + qos_data = QualitiesOfService.load(qfilter) + if not qos_data or name not in qos_data: + raise RPCError(msg=f"QualityOfService {name} does not exist") + + return qos_data[name] + + @property + def name(self): + return cstr.to_unicode(self.ptr.name) + + @name.setter + def name(self, val): + cstr.fmalloc(&self.ptr.name, val) + + @property + def description(self): + return cstr.to_unicode(self.ptr.description) + + @property + def id(self): + return self.ptr.id diff --git a/pyslurm/core/db/stats.pxd b/pyslurm/core/db/stats.pxd new file mode 100644 index 00000000..1f321ab2 --- /dev/null +++ b/pyslurm/core/db/stats.pxd @@ -0,0 +1,143 @@ +######################################################################### +# stats.pxd - pyslurm slurmdbd job stats +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.slurm cimport ( + try_xmalloc, + slurmdb_stats_t, + slurmdb_job_rec_t, +) +from pyslurm.core.db.tres cimport TrackableResources +from pyslurm.core.db.step cimport JobStep, JobSteps +from pyslurm.core.db.job cimport Job +from pyslurm.core.common cimport cstr + + +cdef class JobStats: + """Statistics for a Slurm Job or Step. + + Note: + For more information also see the sacct manpage. + + Attributes: + consumed_energy (int): + Total amount of energy consumed, in joules + elapsed_cpu_time (int): + Total amount of time used(Elapsed time * cpu count) in seconds. + This is not the real CPU-Efficiency, but rather the total amount + of cpu-time the CPUs were occupied for + avg_cpu_time (int): + Average CPU-Time (System + User) in seconds of all tasks + avg_cpu_frequency (int): + Average weighted CPU-Frequency of all tasks, in Kilohertz + avg_disk_read (int): + Average number of bytes read by all tasks + avg_disk_write (int): + Average number of bytes written by all tasks + avg_page_faults (int): + Average number of page faults by all tasks + avg_resident_memory (int): + Average Resident Set Size (RSS) in bytes of all tasks + avg_virtual_memory (int): + Average Virtual Memory Size (VSZ) in bytes of all tasks + max_disk_read (int): + Highest peak number of bytes read by all tasks + max_disk_read_node (int): + Name of the Node where max_disk_read occured + max_disk_read_task (int): + ID of the Task where max_disk_read occured + max_disk_write (int): + Lowest peak number of bytes written by all tasks + max_disk_write_node (int): + Name of the Node where max_disk_write occured + max_disk_write_task (int): + ID of the Task where max_disk_write occured + max_page_faults (int): + Highest peak number of page faults by all tasks + max_page_faults_node (int): + Name of the Node where max_page_faults occured + max_page_faults_task (int): + ID of the Task where max_page_faults occured + max_resident_memory (int): + Highest peak Resident Set Size (RSS) in bytes by all tasks + max_resident_memory_node (int): + Name of the Node where max_resident_memory occured + max_resident_memory_task (int): + ID of the Task where max_resident_memory occured + max_virtual_memory (int): + Highest peak Virtual Memory Size (VSZ) in bytes by all tasks + max_virtual_memory_node (int): + Name of the Node where max_virtual_memory occured + max_virtual_memory_task (int): + ID of the Task where max_virtual_memory occured + min_cpu_time (int): + Lowest peak CPU-Time (System + User) in seconds of all tasks + min_cpu_time_node (int): + Name of the Node where min_cpu_time occured + min_cpu_time_task (int): + ID of the Task where min_cpu_time occured + total_cpu_time (int): + Sum of user_cpu_time and system_cpu_time, in seconds + user_cpu_time (int): + Amount of Time spent in user space, in seconds + system_cpu_time (int): + Amount of Time spent in kernel space, in seconds + """ + cdef slurmdb_job_rec_t *job + + cdef public: + consumed_energy + elapsed_cpu_time + avg_cpu_time + avg_cpu_frequency + avg_disk_read + avg_disk_write + avg_page_faults + avg_resident_memory + avg_virtual_memory + max_disk_read + max_disk_read_node + max_disk_read_task + max_disk_write + max_disk_write_node + max_disk_write_task + max_page_faults + max_page_faults_node + max_page_faults_task + max_resident_memory + max_resident_memory_node + max_resident_memory_task + max_virtual_memory + max_virtual_memory_node + max_virtual_memory_task + min_cpu_time + min_cpu_time_node + min_cpu_time_task + total_cpu_time + user_cpu_time + system_cpu_time + + @staticmethod + cdef JobStats from_step(JobStep step) + diff --git a/pyslurm/core/db/stats.pyx b/pyslurm/core/db/stats.pyx new file mode 100644 index 00000000..bd6606a0 --- /dev/null +++ b/pyslurm/core/db/stats.pyx @@ -0,0 +1,207 @@ +######################################################################### +# stats.pyx - pyslurm slurmdbd job stats +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm.core.common import ( + nodelist_from_range_str, + instance_to_dict, +) + + +cdef class JobStats: + + def __init__(self): + for attr, val in instance_to_dict(self).items(): + setattr(self, attr, 0) + + self.max_disk_read_node = None + self.max_disk_read_task = None + self.max_disk_write_node = None + self.max_disk_write_task = None + self.max_page_faults_node = None + self.max_page_faults_task = None + self.max_resident_memory_node = None + self.max_resident_memory_task = None + self.max_virtual_memory_node = None + self.max_virtual_memory_task = None + self.min_cpu_time_node = None + self.min_cpu_time_task = None + + def as_dict(self): + return instance_to_dict(self) + + @staticmethod + cdef JobStats from_step(JobStep step): + cdef JobStats wrap = JobStats() + if not &step.ptr.stats: + return wrap + + cdef: + list nodes = nodelist_from_range_str( + cstr.to_unicode(step.ptr.nodes)) + cpu_time_adj = 1000 + slurmdb_stats_t *ptr = &step.ptr.stats + + if ptr.consumed_energy != slurm.NO_VAL64: + wrap.consumed_energy = ptr.consumed_energy + + wrap.avg_cpu_time = TrackableResources.find_count_in_str( + ptr.tres_usage_in_ave, slurm.TRES_CPU) / cpu_time_adj + + elapsed = step.elapsed_time if step.elapsed_time else 0 + cpus = step.cpus if step.cpus else 0 + wrap.elapsed_cpu_time = elapsed * cpus + + ave_freq = int(ptr.act_cpufreq) + if ave_freq != slurm.NO_VAL: + wrap.avg_cpu_frequency = ptr.act_cpufreq + + wrap.avg_disk_read = TrackableResources.find_count_in_str( + ptr.tres_usage_in_ave, slurm.TRES_FS_DISK) + wrap.avg_disk_write = TrackableResources.find_count_in_str( + ptr.tres_usage_out_ave, slurm.TRES_FS_DISK) + wrap.avg_page_faults = TrackableResources.find_count_in_str( + ptr.tres_usage_in_ave, slurm.TRES_PAGES) + wrap.avg_resident_memory = TrackableResources.find_count_in_str( + ptr.tres_usage_in_ave, slurm.TRES_MEM) + wrap.avg_virtual_memory = TrackableResources.find_count_in_str( + ptr.tres_usage_in_ave, slurm.TRES_VMEM) + + wrap.max_disk_read = TrackableResources.find_count_in_str( + ptr.tres_usage_in_max, slurm.TRES_FS_DISK) + max_disk_read_nodeid = TrackableResources.find_count_in_str( + ptr.tres_usage_in_max_nodeid, slurm.TRES_FS_DISK) + wrap.max_disk_read_task = TrackableResources.find_count_in_str( + ptr.tres_usage_in_max_taskid, slurm.TRES_FS_DISK) + + wrap.max_disk_write = TrackableResources.find_count_in_str( + ptr.tres_usage_out_max, slurm.TRES_FS_DISK) + max_disk_write_nodeid = TrackableResources.find_count_in_str( + ptr.tres_usage_out_max_nodeid, slurm.TRES_FS_DISK) + wrap.max_disk_write_task = TrackableResources.find_count_in_str( + ptr.tres_usage_out_max_taskid, slurm.TRES_FS_DISK) + + wrap.max_resident_memory = TrackableResources.find_count_in_str( + ptr.tres_usage_in_max, slurm.TRES_MEM) + max_resident_memory_nodeid = TrackableResources.find_count_in_str( + ptr.tres_usage_in_max_nodeid, slurm.TRES_MEM) + wrap.max_resident_memory_task = TrackableResources.find_count_in_str( + ptr.tres_usage_in_max_taskid, slurm.TRES_MEM) + + wrap.max_virtual_memory = TrackableResources.find_count_in_str( + ptr.tres_usage_in_max, slurm.TRES_VMEM) + max_virtual_memory_nodeid = TrackableResources.find_count_in_str( + ptr.tres_usage_in_max_nodeid, slurm.TRES_VMEM) + wrap.max_virtual_memory_task = TrackableResources.find_count_in_str( + ptr.tres_usage_in_max_taskid, slurm.TRES_VMEM) + + wrap.min_cpu_time = TrackableResources.find_count_in_str( + ptr.tres_usage_in_min, slurm.TRES_CPU) / cpu_time_adj + min_cpu_time_nodeid = TrackableResources.find_count_in_str( + ptr.tres_usage_in_min_nodeid, slurm.TRES_CPU) + wrap.min_cpu_time_task = TrackableResources.find_count_in_str( + ptr.tres_usage_in_min_taskid, slurm.TRES_CPU) + + wrap.total_cpu_time = TrackableResources.find_count_in_str( + ptr.tres_usage_in_tot, slurm.TRES_CPU) + + if nodes: + wrap.max_disk_write_node = nodes[max_disk_write_nodeid] + wrap.max_disk_read_node = nodes[max_disk_read_nodeid] + wrap.max_resident_memory_node = nodes[max_resident_memory_nodeid] + wrap.max_virtual_memory_node = nodes[max_virtual_memory_nodeid] + wrap.min_cpu_time_node = nodes[min_cpu_time_nodeid] + + if step.ptr.user_cpu_sec != slurm.NO_VAL64: + wrap.user_cpu_time = step.ptr.user_cpu_sec + + if step.ptr.sys_cpu_sec != slurm.NO_VAL64: + wrap.system_cpu_time = step.ptr.sys_cpu_sec + + return wrap + + @staticmethod + def _sum_step_stats_for_job(Job job, JobSteps steps): + cdef: + JobStats job_stats = job.stats + JobStats step_stats = None + + for step in steps.values(): + step_stats = step.stats + + job_stats.consumed_energy += step_stats.consumed_energy + job_stats.avg_cpu_time += step_stats.avg_cpu_time + job_stats.avg_cpu_frequency += step_stats.avg_cpu_frequency + job_stats.avg_disk_read += step_stats.avg_disk_read + job_stats.avg_disk_write += step_stats.avg_disk_write + job_stats.avg_page_faults += step_stats.avg_page_faults + + if step_stats.max_disk_read >= job_stats.max_disk_read: + job_stats.max_disk_read = step_stats.max_disk_read + job_stats.max_disk_read_node = step_stats.max_disk_read_node + job_stats.max_disk_read_task = step_stats.max_disk_read_task + + if step_stats.max_disk_write >= job_stats.max_disk_write: + job_stats.max_disk_write = step_stats.max_disk_write + job_stats.max_disk_write_node = step_stats.max_disk_write_node + job_stats.max_disk_write_task = step_stats.max_disk_write_task + + if step_stats.max_page_faults >= job_stats.max_page_faults: + job_stats.max_page_faults = step_stats.max_page_faults + job_stats.max_page_faults_node = step_stats.max_page_faults_node + job_stats.max_page_faults_task = step_stats.max_page_faults_task + + if step_stats.max_resident_memory >= job_stats.max_resident_memory: + job_stats.max_resident_memory = step_stats.max_resident_memory + job_stats.max_resident_memory_node = step_stats.max_resident_memory_node + job_stats.max_resident_memory_task = step_stats.max_resident_memory_task + job_stats.avg_resident_memory = job_stats.max_resident_memory + + if step_stats.max_virtual_memory >= job_stats.max_virtual_memory: + job_stats.max_virtual_memory = step_stats.max_virtual_memory + job_stats.max_virtual_memory_node = step_stats.max_virtual_memory_node + job_stats.max_virtual_memory_task = step_stats.max_virtual_memory_task + job_stats.avg_virtual_memory = job_stats.max_virtual_memory + + if step_stats.min_cpu_time >= job_stats.min_cpu_time: + job_stats.min_cpu_time = step_stats.min_cpu_time + job_stats.min_cpu_time_node = step_stats.min_cpu_time_node + job_stats.min_cpu_time_task = step_stats.min_cpu_time_task + + if job.ptr.tot_cpu_sec != slurm.NO_VAL64: + job_stats.total_cpu_time = job.ptr.tot_cpu_sec + + if job.ptr.user_cpu_sec != slurm.NO_VAL64: + job_stats.user_cpu_time = job.ptr.user_cpu_sec + + if job.ptr.sys_cpu_sec != slurm.NO_VAL64: + job_stats.system_cpu_time = job.ptr.sys_cpu_sec + + elapsed = job.elapsed_time if job.elapsed_time else 0 + cpus = job.cpus if job.cpus else 0 + job_stats.elapsed_cpu_time = elapsed * cpus + + step_count = len(steps) + if step_count: + job_stats.avg_cpu_frequency /= step_count + diff --git a/pyslurm/core/db/step.pxd b/pyslurm/core/db/step.pxd new file mode 100644 index 00000000..77d45cd2 --- /dev/null +++ b/pyslurm/core/db/step.pxd @@ -0,0 +1,100 @@ +######################################################################### +# step.pxd - pyslurm slurmdbd step api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.slurm cimport ( + slurmdb_job_rec_t, + slurmdb_job_cond_t, + slurmdb_step_rec_t, + slurmdb_jobs_get, + slurmdb_destroy_job_cond, + slurmdb_destroy_job_rec, + slurmdb_destroy_step_rec, + try_xmalloc, + slurmdb_job_cond_def_start_end, + slurm_job_state_string, + slurm_job_reason_string, +) +from pyslurm.core.db.util cimport SlurmList, SlurmListItem +from pyslurm.core.db.connection cimport Connection +from pyslurm.core.common cimport cstr +from pyslurm.core.db.stats cimport JobStats + + +cdef class JobSteps(dict): + pass + + +cdef class JobStep: + """A Slurm Database Job-step. + + Attributes: + stats (pyslurm.db.JobStats): + Utilization statistics for this Step + num_nodes (int): + Amount of nodes this Step has allocated + cpus (int): + Amount of CPUs the Step has/had allocated + memory (int): + Amount of memory the Step requested + container (str): + Path to OCI Container bundle + elapsed_time (int): + Amount of seconds elapsed for the Step + end_time (int): + When the Step ended, as a unix timestamp + eligible_time (int): + When the Step became eligible to run, as a unix timestamp + start_time (int): + Time when the Step started, as a unix timestamp + exit_code (int): + Exit code of the step + ntasks (int): + Number of tasks the Step uses + cpu_frequency_min (str): + Minimum CPU-Frequency requested for the Step + cpu_frequency_max (str): + Maximum CPU-Frequency requested for the Step + cpu_frequency_governor (str): + CPU-Frequency Governor requested for the Step + nodelist (str): + Nodes this Step is using + id (Union[str, int]): + ID of the Step + job_id (int): + ID of the Job this Step is a part of + state (str): + State of the Step + cancelled_by (str): + Name of the User who cancelled this Step + submit_command (str): + Full command issued to start the Step + suspended_time (int): + Amount of seconds the Step was suspended + """ + cdef slurmdb_step_rec_t *ptr + cdef public JobStats stats + + @staticmethod + cdef JobStep from_ptr(slurmdb_step_rec_t *step) diff --git a/pyslurm/core/db/step.pyx b/pyslurm/core/db/step.pyx new file mode 100644 index 00000000..aa1bd612 --- /dev/null +++ b/pyslurm/core/db/step.pyx @@ -0,0 +1,177 @@ +######################################################################### +# step.pyx - pyslurm slurmdbd step api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from os import WIFSIGNALED, WIFEXITED, WTERMSIG, WEXITSTATUS +from pyslurm.core.error import RPCError +from pyslurm.core.db.tres cimport TrackableResources, TrackableResource +from pyslurm.core.common.uint import * +from pyslurm.core.common.ctime import _raw_time +from pyslurm.core.common import ( + gid_to_name, + uid_to_name, + instance_to_dict, +) +from pyslurm.core.job.util import cpu_freq_int_to_str +from pyslurm.core.job.step import humanize_step_id + + +cdef class JobStep: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self): + raise RuntimeError("You can not instantiate this class directly " + " at the moment") + + def __dealloc__(self): + slurmdb_destroy_step_rec(self.ptr) + self.ptr = NULL + + @staticmethod + cdef JobStep from_ptr(slurmdb_step_rec_t *step): + cdef JobStep wrap = JobStep.__new__(JobStep) + wrap.ptr = step + wrap.stats = JobStats.from_step(wrap) + return wrap + + def as_dict(self): + cdef dict out = instance_to_dict(self) + out["stats"] = self.stats.as_dict() + return out + + @property + def num_nodes(self): + nnodes = u32_parse(self.ptr.nnodes) + if not nnodes and self.ptr.tres_alloc_str: + return TrackableResources.find_count_in_str( + self.ptr.tres_alloc_str, slurm.TRES_NODE) + else: + return nnodes + + @property + def cpus(self): + req_cpus = TrackableResources.find_count_in_str( + self.ptr.tres_alloc_str, slurm.TRES_CPU) + + if req_cpus == slurm.INFINITE64: + return 0 + + return req_cpus +# if req_cpus == slurm.INFINITE64 and step.job_ptr: +# tres_alloc_str = cstr.to_unicode(step.job_ptr.tres_alloc_str) +# req_cpus = TrackableResources.find_count_in_str(tres_alloc_str, +# slurm.TRES_CPU) +# if not req_cpus: +# tres_req_str = cstr.to_unicode(step.job_ptr.tres_req_str) +# req_cpus = TrackableResources.find_count_in_str(tres_req_str, +# slurm.TRES_CPU) + + @property + def memory(self): + val = TrackableResources.find_count_in_str(self.ptr.tres_alloc_str, + slurm.TRES_MEM) + return val + + # Only in Parent Job available: + # resvcpu? + + @property + def container(self): + return cstr.to_unicode(self.ptr.container) + + @property + def elapsed_time(self): + # seconds + return _raw_time(self.ptr.elapsed) + + @property + def end_time(self): + return _raw_time(self.ptr.end) + + @property + def eligible_time(self): + return _raw_time(self.ptr.start) + + @property + def start_time(self): + return _raw_time(self.ptr.start) + + @property + def exit_code(self): + # TODO + return None + + @property + def ntasks(self): + return u32_parse(self.ptr.ntasks) + + @property + def cpu_frequency_min(self): + return cpu_freq_int_to_str(self.ptr.req_cpufreq_min) + + @property + def cpu_frequency_max(self): + return cpu_freq_int_to_str(self.ptr.req_cpufreq_max) + + @property + def cpu_frequency_governor(self): + return cpu_freq_int_to_str(self.ptr.req_cpufreq_gov) + + @property + def nodelist(self): + return cstr.to_unicode(self.ptr.nodes) + + @property + def id(self): + return humanize_step_id(self.ptr.step_id.step_id) + + @property + def job_id(self): + return self.ptr.step_id.job_id + + @property + def name(self): + return cstr.to_unicode(self.ptr.stepname) + +# @property +# def distribution(self): +# # ptr.task_dist +# pass + + @property + def state(self): + return cstr.to_unicode(slurm_job_state_string(self.ptr.state)) + + @property + def cancelled_by(self): + return uid_to_name(self.ptr.requid) + + @property + def submit_command(self): + return cstr.to_unicode(self.ptr.submit_line) + + @property + def suspended_time(self): + return _raw_time(self.ptr.elapsed) diff --git a/pyslurm/core/db/tres.pxd b/pyslurm/core/db/tres.pxd new file mode 100644 index 00000000..f08bb3df --- /dev/null +++ b/pyslurm/core/db/tres.pxd @@ -0,0 +1,45 @@ +######################################################################### +# tres.pxd - pyslurm slurmdbd tres api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.core.common cimport cstr +from libc.stdint cimport uint64_t +from pyslurm.slurm cimport ( + slurmdb_tres_rec_t, + slurmdb_destroy_tres_rec, + slurmdb_find_tres_count_in_string, + try_xmalloc, +) + + +cdef class TrackableResources(dict): + cdef public raw_str + + @staticmethod + cdef TrackableResources from_str(char *tres_str) + + +cdef class TrackableResource: + cdef slurmdb_tres_rec_t *ptr + + @staticmethod + cdef TrackableResource from_ptr(slurmdb_tres_rec_t *in_ptr) diff --git a/pyslurm/core/db/tres.pyx b/pyslurm/core/db/tres.pyx new file mode 100644 index 00000000..1e77994b --- /dev/null +++ b/pyslurm/core/db/tres.pyx @@ -0,0 +1,112 @@ +######################################################################### +# tres.pyx - pyslurm slurmdbd tres api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm.core.common.uint import * + + +cdef class TrackableResources(dict): + + def __init__(self): + pass + + @staticmethod + cdef TrackableResources from_str(char *tres_str): + cdef: + TrackableResources tres_collection + TrackableResource tres + str raw_str = cstr.to_unicode(tres_str) + dict tres_dict + + tres_collection = TrackableResources.__new__(TrackableResources) + if not raw_str: + return tres_collection + + tres_collection.raw_str = raw_str + tres_dict = cstr.to_dict(tres_str) + for tres_id, val in tres_dict.items(): + tres = TrackableResource(tres_id) + tres.ptr.count = val + + return tres + + @staticmethod + def find_count_in_str(tres_str, typ): + if not tres_str: + return 0 + + cdef uint64_t tmp + tmp = slurmdb_find_tres_count_in_string(tres_str, typ) + if tmp == slurm.INFINITE64 or tmp == slurm.NO_VAL64: + return 0 + else: + return tmp + + +cdef class TrackableResource: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, tres_id): + self._alloc_impl() + self.ptr.id = tres_id + + def __dealloc__(self): + self._dealloc_impl() + + def _alloc_impl(self): + if not self.ptr: + self.ptr = try_xmalloc( + sizeof(slurmdb_tres_rec_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for slurmdb_tres_rec_t") + + def _dealloc_impl(self): + slurmdb_destroy_tres_rec(self.ptr) + self.ptr = NULL + + @staticmethod + cdef TrackableResource from_ptr(slurmdb_tres_rec_t *in_ptr): + cdef TrackableResource wrap = TrackableResource.__new__(TrackableResource) + wrap.ptr = in_ptr + return wrap + + @property + def id(self): + return self.ptr.id + + @property + def name(self): + return cstr.to_unicode(self.ptr.name) + + @property + def type(self): + return cstr.to_unicode(self.ptr.type) + + @property + def count(self): + return u64_parse(self.ptr.count) + + # rec_count + # alloc_secs diff --git a/pyslurm/core/db/util.pxd b/pyslurm/core/db/util.pxd new file mode 100644 index 00000000..deb71ed4 --- /dev/null +++ b/pyslurm/core/db/util.pxd @@ -0,0 +1,65 @@ +######################################################################### +# util.pxd - pyslurm slurmdbd util functions +######################################################################### +# Copyright (C) 2022 Toni Harzendorf +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.core.common cimport cstr +from pyslurm.slurm cimport ( + ListIterator, + List, + slurm_list_iterator_create, + slurm_list_iterator_destroy, + slurm_list_iterator_reset, + slurm_list_count, + slurm_list_next, + slurm_list_destroy, + slurm_list_create, + slurm_list_pop, + slurm_list_append, + slurm_xfree_ptr, +) + +cdef slurm_list_to_pylist(List in_list) +cdef make_char_list(List *in_list, vals) + + +cdef class SlurmListItem: + cdef void *data + + @staticmethod + cdef SlurmListItem from_ptr(void *item) + + +cdef class SlurmList: + cdef: + List info + ListIterator itr + + cdef readonly: + owned + int itr_cnt + int cnt + + @staticmethod + cdef SlurmList wrap(List, owned=*) + + @staticmethod + cdef SlurmList create(slurm.ListDelF delf, owned=*) diff --git a/pyslurm/core/db/util.pyx b/pyslurm/core/db/util.pyx new file mode 100644 index 00000000..2560c4b0 --- /dev/null +++ b/pyslurm/core/db/util.pyx @@ -0,0 +1,188 @@ +######################################################################### +# util.pyx - pyslurm slurmdbd util functions +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + + +cdef make_char_list(List *in_list, vals): + if not vals: + return None + + # Make a new SlurmList wrapper with the values + cdef SlurmList slist = SlurmList(vals) + + # Make sure the previous list is deallocated + if in_list[0]: + slurm_list_destroy(in_list[0]) + + # Assign the pointer from slist to in_list, and give up ownership of slist + in_list[0] = slist.info + slist.owned = False + + +cdef slurm_list_to_pylist(List in_list): + return SlurmList.wrap(in_list, owned=False).to_pylist() + + +cdef class SlurmListItem: + + def __cinit__(self): + self.data = NULL + + @staticmethod + cdef SlurmListItem from_ptr(void *item): + cdef SlurmListItem wrap = SlurmListItem.__new__(SlurmListItem) + wrap.data = item + return wrap + + @property + def has_data(self): + if self.data: + return True + else: + return False + + def to_str(self): + # Mostly for debugging purposes. Can only be used "safely" if we have + # a char* list + cdef char* entry = self.data + return cstr.to_unicode(entry) + + +cdef class SlurmList: + """Convenience Wrapper around slurms List type""" + def __cinit__(self): + self.info = NULL + self.itr = NULL + self.itr_cnt = 0 + self.cnt = 0 + self.owned = True + + def __init__(self, vals=None): + self.info = slurm_list_create(slurm_xfree_ptr) + self.append(vals) + + def __dealloc__(self): + self._dealloc_itr() + self._dealloc_list() + + def _dealloc_list(self): + if self.info is not NULL and self.owned: + slurm_list_destroy(self.info) + self.cnt = 0 + self.info = NULL + + def _dealloc_itr(self): + if self.itr: + slurm_list_iterator_destroy(self.itr) + self.itr_cnt = 0 + self.itr = NULL + + def __iter__(self): + self._dealloc_itr() + if not self.is_null: + self.itr = slurm_list_iterator_create(self.info) + + return self + + def __next__(self): + if self.is_null or self.is_itr_null: + raise StopIteration + + if self.itr_cnt < self.cnt: + self.itr_cnt += 1 + return SlurmListItem.from_ptr(slurm_list_next(self.itr)) + + self._dealloc_itr() + raise StopIteration + + @staticmethod + def iter_and_pop(SlurmList li): + while li.cnt > 0: + yield SlurmListItem.from_ptr(slurm_list_pop(li.info)) + li.cnt -= 1 + + @staticmethod + cdef SlurmList create(slurm.ListDelF delfunc, owned=True): + cdef SlurmList wrapper = SlurmList.__new__(SlurmList) + wrapper.info = slurm_list_create(delfunc) + wrapper.owned = owned + return wrapper + + @staticmethod + cdef SlurmList wrap(List li, owned=True): + cdef SlurmList wrapper = SlurmList.__new__(SlurmList) + if not li: + return wrapper + + wrapper.info = li + wrapper.cnt = slurm_list_count(li) + wrapper.owned = owned + return wrapper + + def to_pylist(self): + cdef: + SlurmListItem item + list out = [] + + for item in self: + if not item.has_data: + continue + + pystr = cstr.to_unicode(item.data) + if pystr: + out.append(int(pystr) if pystr.isdigit() else pystr) + + return out + + def append(self, vals): + cdef char *entry = NULL + + if not vals: + return None + + to_add = vals + if not isinstance(vals, list): + # If it is not a list, then anything that can't be casted to str + # will error below anyways + to_add = [vals] + + for val in to_add: + if val: + entry = NULL + cstr.fmalloc(&entry, str(val)) + slurm_list_append(self.info, entry) + self.cnt += 1 + + @property + def is_itr_null(self): + if not self.itr: + return True + else: + return False + + @property + def is_null(self): + if not self.info: + return True + else: + return False diff --git a/pyslurm/core/error.pyx b/pyslurm/core/error.pyx new file mode 100644 index 00000000..69130abd --- /dev/null +++ b/pyslurm/core/error.pyx @@ -0,0 +1,100 @@ +######################################################################### +# error.pyx - pyslurm error utilities +######################################################################### +# Copyright (C) 2022 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm.core.common cimport cstr +from pyslurm cimport slurm +from pyslurm.slurm cimport slurm_get_errno + + +def slurm_strerror(errno): + """Convert a slurm errno to a string. + + Args: + errno (int): + The error number for which the string representation should be + returned. + + Returns: + (str): String representation of errno. + """ + return cstr.to_unicode(slurm.slurm_strerror(errno)) + + +def slurm_errno(): + """Get the current slurm errno. + + Returns: + (int): Current slurm errno + """ + return slurm_get_errno() + + +def get_last_slurm_error(): + """Get the last slurm error that occured as a tuple of errno and string. + + Returns: + errno (int): The error number + errno_str (str): The errno converted to a String + """ + errno = slurm_errno() + + if errno == slurm.SLURM_SUCCESS: + return (errno, 'Success') + else: + return (errno, slurm_strerror(errno)) + + +class RPCError(Exception): + """Exception for handling Slurm RPC errors. + + Args: + errno (int): + A slurm error number returned by RPC functions. Default is None, + which will get the last slurm error automatically. + msg (str): + An optional, custom error description. If this is set, the errno + will not be translated to its string representation. + """ + def __init__(self, errno=slurm.SLURM_ERROR, msg=None): + self.msg = msg + self.errno = errno + + if not msg: + if errno == slurm.SLURM_ERROR: + self.errno, self.msg = get_last_slurm_error() + else: + self.msg = slurm_strerror(errno) + + super().__init__(self.msg) + + +def verify_rpc(errno): + """Verify a Slurm RPC + + Args: + errno (int): + A Slurm error value + """ + if errno != slurm.SLURM_SUCCESS: + raise RPCError(errno) diff --git a/pyslurm/core/job/__init__.pxd b/pyslurm/core/job/__init__.pxd new file mode 100644 index 00000000..e69de29b diff --git a/pyslurm/core/job/__init__.py b/pyslurm/core/job/__init__.py new file mode 100644 index 00000000..ccc396e2 --- /dev/null +++ b/pyslurm/core/job/__init__.py @@ -0,0 +1,3 @@ +from .job import Job, Jobs +from .step import JobStep, JobSteps +from .submission import JobSubmitDescription diff --git a/pyslurm/core/job/job.pxd b/pyslurm/core/job/job.pxd new file mode 100644 index 00000000..c41c8ced --- /dev/null +++ b/pyslurm/core/job/job.pxd @@ -0,0 +1,387 @@ +######################################################################### +# job.pyx - interface to retrieve slurm job informations +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm.core.common cimport cstr, ctime +from pyslurm.core.common.uint cimport * +from pyslurm.core.common.ctime cimport time_t + +from libc.string cimport memcpy, memset +from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t, int64_t +from libc.stdlib cimport free + +from pyslurm.core.job.submission cimport JobSubmitDescription +from pyslurm.core.job.step cimport JobSteps, JobStep + +from pyslurm cimport slurm +from pyslurm.slurm cimport ( + working_cluster_rec, + slurm_msg_t, + job_id_msg_t, + slurm_msg_t_init, + return_code_msg_t, + slurm_send_recv_controller_msg, + slurm_free_return_code_msg, + slurm_free_job_info_msg, + slurm_free_job_info, + slurm_load_job, + slurm_load_jobs, + job_info_msg_t, + slurm_job_info_t, + slurm_job_state_string, + slurm_job_reason_string, + slurm_job_share_string, + slurm_job_batch_script, + slurm_get_job_stdin, + slurm_get_job_stdout, + slurm_get_job_stderr, + slurm_signal_job, + slurm_kill_job, + slurm_resume, + slurm_suspend, + slurm_update_job, + slurm_notify_job, + slurm_requeue, + xfree, + try_xmalloc, +) + + +cdef class Jobs(dict): + """A collection of Job objects. + + Args: + jobs (Union[list, dict], optional): + Jobs to initialize this collection with. + freeze (bool, optional): + Control whether this collection is "frozen" when reloading Job + information. + + Attributes: + memory (int): + Total amount of memory for all Jobs in this collection, in + Mebibytes + cpus (int): + Total amount of cpus for all Jobs in this collection. + ntasks (int): + Total amount of tasks for all Jobs in this collection. + cpu_time (int): + Total amount of CPU-Time used by all the Jobs in the collection. + This is the result of multiplying the run_time with the amount of + cpus for each job. + freeze (bool): + If this is set to True and the reload() method is called, then + *ONLY* Jobs that already exist in this collection will be + reloaded. New Jobs that are discovered will not be added to this + collection, but old Jobs which have already been purged from the + Slurm controllers memory will not be removed either. + The default is False, so old jobs will be removed, and new Jobs + will be added - basically the same behaviour as doing Jobs.load(). + """ + cdef: + job_info_msg_t *info + slurm_job_info_t tmp_info + + cdef public: + freeze + + +cdef class Job: + """A Slurm Job. + + All attributes in this class are read-only. + + Args: + job_id (int): + An Integer representing a Job-ID. + + Raises: + MemoryError: If malloc fails to allocate memory. + + Attributes: + steps (JobSteps): + Steps this Job has. + Before you can access the Steps data for a Job, you have to call + the reload() method of a Job instance or the load_steps() method + of a Jobs collection. + name (str): + Name of the Job + id (int): + Unique ID of the Job. + association_id (int): + ID of the Association this Job runs with. + account (str): + Name of the Account this Job is runs with. + user_id (int): + UID of the User who submitted the Job. + user_name (str): + Name of the User who submitted the Job. + group_id (int): + GID of the Group that Job runs under. + group_name (str): + Name of the Group this Job runs under. + priority (int): + Priority of the Job. + nice (int): + Nice Value of the Job. + qos (str): + QOS Name of the Job. + min_cpus_per_node (int): + Minimum Amount of CPUs per Node the Job requested. + state (str): + State this Job is currently in. + state_reason (str): + A Reason explaining why the Job is in its current state. + is_requeueable (bool): + Whether the Job is requeuable or not. + requeue_count (int): + Amount of times the Job has been requeued. + is_batch_job (bool): + Whether the Job is a batch job or not. + node_reboot_required (bool): + Whether the Job requires the Nodes to be rebooted first. + dependencies (dict): + Dependencies the Job has to other Jobs. + time_limit (int): + Time-Limit, in minutes, for this Job. + time_limit_min (int): + Minimum Time-Limit in minutes for this Job. + submit_time (int): + Time the Job was submitted, as unix timestamp. + eligible_time (int): + Time the Job is eligible to start, as unix timestamp. + accrue_time (int): + Job accrue time, as unix timestamp + start_time (int): + Time this Job has started execution, as unix timestamp. + resize_time (int): + Time the job was resized, as unix timestamp. + deadline (int): + Time when a pending Job will be cancelled, as unix timestamp. + preempt_eligible_time (int): + Time the Job is eligible for preemption, as unix timestamp. + preempt_time (int): + Time the Job was signaled for preemption, as unix timestamp. + suspend_time (int): + Last Time the Job was suspended, as unix timestamp. + last_sched_evaluation_time (int): + Last time evaluated for Scheduling, as unix timestamp. + pre_suspension_time (int): + Amount of seconds the Job ran prior to suspension, as unix + timestamp + mcs_label (str): + MCS Label for the Job + partition (str): + Name of the Partition the Job runs in. + submit_host (str): + Name of the Host this Job was submitted from. + batch_host (str): + Name of the Host where the Batch-Script is executed. + num_nodes (int): + Amount of Nodes the Job has requested or allocated. + max_nodes (int): + Maximum amount of Nodes the Job has requested. + allocated_nodes (str): + Nodes the Job is currently using. + This is only valid when the Job is running. If the Job is pending, + it will always return None. + required_nodes (str): + Nodes the Job is explicitly requiring to run on. + excluded_nodes (str): + Nodes that are explicitly excluded for execution. + scheduled_nodes (str): + Nodes the Job is scheduled on by the slurm controller. + derived_exit_code (int): + The derived exit code for the Job. + derived_exit_code_signal (int): + Signal for the derived exit code. + exit_code (int): + Code with which the Job has exited. + exit_code_signal (int): + The signal which has led to the exit code of the Job. + batch_constraints (list): + Features that node(s) should have for the batch script. + Controls where it is possible to execute the batch-script of the + job. Also see 'constraints' + federation_origin (str): + Federation Origin + federation_siblings_active (int): + Federation siblings active + federation_siblings_viable (int): + Federation siblings viable + cpus (int): + Total amount of CPUs the Job is using. + If the Job is still pending, this will be the amount of requested + CPUs. + cpus_per_task (int): + Number of CPUs per Task used. + cpus_per_gpu (int): + Number of CPUs per GPU used. + boards_per_node (int): + Number of boards per Node. + sockets_per_board (int): + Number of sockets per board. + sockets_per_node (int): + Number of sockets per node. + cores_per_socket (int): + Number of cores per socket. + threads_per_core (int): + Number of threads per core. + ntasks (int): + Number of parallel processes. + ntasks_per_node (int): + Number of parallel processes per node. + ntasks_per_board (int): + Number of parallel processes per board. + ntasks_per_socket (int): + Number of parallel processes per socket. + ntasks_per_core (int): + Number of parallel processes per core. + ntasks_per_gpu (int): + Number of parallel processes per GPU. + delay_boot_time (int): + https://slurm.schedmd.com/sbatch.html#OPT_delay-boot, in minutes + constraints (list): + A list of features the Job requires nodes to have. + In contrast, the 'batch_constraints' option only focuses on the + initial batch-script placement. This option however means features + to restrict the list of nodes a job is able to execute on in + general beyond the initial batch-script. + cluster (str): + Name of the cluster the job is executing on. + cluster_constraints (list): + A List of features that a cluster should have. + reservation (str): + Name of the reservation this Job uses. + resource_sharing (str): + Mode controlling how a job shares resources with others. + requires_contiguous_nodes (bool): + Whether the Job has allocated a set of contiguous nodes. + licenses (list): + List of licenses the Job needs. + network (str): + Network specification for the Job. + command (str): + The command that is executed for the Job. + working_directory (str): + Path to the working directory for this Job. + admin_comment (str): + An arbitrary comment set by an administrator for the Job. + system_comment (str): + An arbitrary comment set by the slurmctld for the Job. + container (str): + The container this Job uses. + comment (str): + An arbitrary comment set for the Job. + standard_input (str): + The path to the file for the standard input stream. + standard_output (str): + The path to the log file for the standard output stream. + standard_error (str): + The path to the log file for the standard error stream. + required_switches (int): + Number of switches required. + max_wait_time_switches (int): + Amount of seconds to wait for the switches. + burst_buffer (str): + Burst buffer specification + burst_buffer_state (str): + Burst buffer state + cpu_frequency_min (Union[str, int]): + Minimum CPU-Frequency requested. + cpu_frequency_max (Union[str, int]): + Maximum CPU-Frequency requested. + cpu_frequency_governor (Union[str, int]): + CPU-Frequency Governor requested. + wckey (str): + Name of the WCKey this Job uses. + mail_user (list): + Users that should receive Mails for this Job. + mail_types (list): + Mail Flags specified by the User. + heterogeneous_id (int): + Heterogeneous job id. + heterogeneous_offset (int): + Heterogeneous job offset. + temporary_disk_per_node (int): + Temporary disk space in Mebibytes available per Node. + array_id (int): + The master Array-Job ID. + array_tasks_parallel (int): + Max number of array tasks allowed to run simultaneously. + array_task_id (int): + Array Task ID of this Job if it is an Array-Job. + array_tasks_waiting (str): + Array Tasks that are still waiting. + end_time (int): + Time at which this Job will end, as unix timestamp. + run_time (int): + Amount of seconds the Job has been running. + cores_reserved_for_system (int): + Amount of cores reserved for System use only. + threads_reserved_for_system (int): + Amount of Threads reserved for System use only. + memory (int): + Total Amount of Memory this Job has, in Mebibytes + memory_per_cpu (int): + Amount of Memory per CPU this Job has, in Mebibytes + memory_per_node (int): + Amount of Memory per Node this Job has, in Mebibytes + memory_per_gpu (int): + Amount of Memory per GPU this Job has, in Mebibytes + gres_per_node (dict): + Generic Resources (e.g. GPU) this Job is using per Node. + profile_types (list): + Types for which detailed accounting data is collected. + gres_binding (str): + Binding Enforcement of a Generic Resource (e.g. GPU). + kill_on_invalid_dependency (bool): + Whether the Job should be killed on an invalid dependency. + spreads_over_nodes (bool): + Whether the Job should be spreaded over as many nodes as possible. + power_options (list): + Options set for Power Management. + is_cronjob (bool): + Whether this Job is a cronjob. + cronjob_time (str): + The time specification for the Cronjob. + cpu_time (int): + Amount of CPU-Time used by the Job so far. + This is the result of multiplying the run_time with the amount of + cpus. + """ + cdef: + slurm_job_info_t *ptr + dict passwd + dict groups + + cdef public JobSteps steps + + cdef _calc_run_time(self) + + @staticmethod + cdef _swap_data(Job dst, Job src) + + @staticmethod + cdef Job from_ptr(slurm_job_info_t *in_ptr) + diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx new file mode 100644 index 00000000..1e160c80 --- /dev/null +++ b/pyslurm/core/job/job.pyx @@ -0,0 +1,1346 @@ +######################################################################### +# job.pyx - interface to retrieve slurm job informations +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# Note: Some functions in this File are annotated with additional Copyright +# notices. These functions are: +# +# - get_batch_script +# - get_resource_layout_per_node +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from os import WIFSIGNALED, WIFEXITED, WTERMSIG, WEXITSTATUS +import re +from typing import Union +from pyslurm.core.common import cstr, ctime +from pyslurm.core.common.uint import * +from pyslurm.core.job.util import * +from pyslurm.core.error import ( + RPCError, + verify_rpc, + slurm_errno, +) +from pyslurm.core.common.ctime import _raw_time +from pyslurm.core.common import ( + uid_to_name, + gid_to_name, + signal_to_num, + _getgrall_to_dict, + _getpwall_to_dict, + instance_to_dict, + _sum_prop, +) + + +cdef class Jobs(dict): + + def __cinit__(self): + self.info = NULL + + def __dealloc__(self): + slurm_free_job_info_msg(self.info) + + def __init__(self, jobs=None, freeze=False): + self.freeze = freeze + + if isinstance(jobs, dict): + self.update(jobs) + elif jobs is not None: + for job in jobs: + if isinstance(job, int): + self[job] = Job(job) + else: + self[job.id] = job + + @staticmethod + def load(preload_passwd_info=False, freeze=False): + """Retrieve all Jobs from the Slurm controller + + Args: + preload_passwd_info (bool, optional): + Decides whether to query passwd and groups information from + the system. + Could potentially speed up access to attributes of the Job + where a UID/GID is translated to a name. If True, the + information will fetched and stored in each of the Job + instances. + freeze (bool, optional): + Decide whether this collection of Jobs should be "frozen". + + Returns: + (Jobs): A collection of Job objects. + + Raises: + RPCError: When getting all the Jobs from the slurmctld failed. + MemoryError: If malloc fails to allocate memory. + """ + cdef: + dict passwd = {} + dict groups = {} + Jobs jobs = Jobs.__new__(Jobs) + int flags = slurm.SHOW_ALL | slurm.SHOW_DETAIL + Job job + + verify_rpc(slurm_load_jobs(0, &jobs.info, flags)) + + # If requested, preload the passwd and groups database to potentially + # speedup lookups for an attribute in a Job, e.g. user_name or + # group_name. + if preload_passwd_info: + passwd = _getpwall_to_dict() + groups = _getgrall_to_dict() + + # zero-out a dummy job_step_info_t + memset(&jobs.tmp_info, 0, sizeof(slurm_job_info_t)) + + # Put each job pointer into its own "Job" instance. + for cnt in range(jobs.info.record_count): + job = Job.from_ptr(&jobs.info.job_array[cnt]) + + # Prevent double free if xmalloc fails mid-loop and a MemoryError + # is raised by replacing it with a zeroed-out slurm_job_info_t. + jobs.info.job_array[cnt] = jobs.tmp_info + + if preload_passwd_info: + job.passwd = passwd + job.groups = groups + + jobs[job.id] = job + + # At this point we memcpy'd all the memory for the Jobs. Setting this + # to 0 will prevent the slurm job free function to deallocate the + # memory for the individual jobs. This should be fine, because they + # are free'd automatically in __dealloc__ since the lifetime of each + # job-pointer is tied to the lifetime of its corresponding "Job" + # instance. + jobs.info.record_count = 0 + + jobs.freeze = freeze + return jobs + + def reload(self): + """Reload the information for jobs in a collection. + + Raises: + RPCError: When getting the Jobs from the slurmctld failed. + """ + cdef Jobs reloaded_jobs = Jobs.load() + + for jid in list(self.keys()): + if jid in reloaded_jobs: + # Put the new data in. + self[jid] = reloaded_jobs[jid] + elif not self.freeze: + # Remove this instance from the current collection, as the Job + # doesn't exist anymore. + del self[jid] + + if not self.freeze: + for jid in reloaded_jobs: + if jid not in self: + self[jid] = reloaded_jobs[jid] + + return self + + def load_steps(self): + """Load all Job steps for this collection of Jobs. + + This function fills in the "steps" attribute for all Jobs in the + collection. + + Note: + Pending Jobs will be ignored, since they don't have any Steps yet. + + Raises: + RPCError: When retrieving the Job information for all the Steps + failed. + """ + cdef dict step_info = JobSteps.load_all() + + for jid in self: + # Ignore any Steps from Jobs which do not exist in this + # collection. + if jid in step_info: + self[jid].steps = step_info[jid] + + def as_list(self): + """Format the information as list of Job objects. + + Returns: + (list): List of Job objects + """ + return list(self.values()) + + @property + def memory(self): + return _sum_prop(self, Job.memory) + + @property + def cpus(self): + return _sum_prop(self, Job.cpus) + + @property + def ntasks(self): + return _sum_prop(self, Job.ntasks) + + @property + def cpu_time(self): + return _sum_prop(self, Job.cpu_time) + + +cdef class Job: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, job_id): + self._alloc_impl() + self.ptr.job_id = job_id + self.passwd = {} + self.groups = {} + self.steps = JobSteps.__new__(JobSteps) + + def _alloc_impl(self): + if not self.ptr: + self.ptr = try_xmalloc(sizeof(slurm_job_info_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for job_info_t") + + def _dealloc_impl(self): + slurm_free_job_info(self.ptr) + self.ptr = NULL + + def __dealloc__(self): + self._dealloc_impl() + + def __eq__(self, other): + return isinstance(other, Job) and self.id == other.id + + @staticmethod + def load(job_id): + """Load information for a specific Job. + + Implements the slurm_load_job RPC. + + Note: + If the Job is not pending, the related Job steps will also be + loaded. + + Args: + job_id (int): + An Integer representing a Job-ID. + + Returns: + (pyslurm.Job): Returns a new Job instance + + Raises: + RPCError: If requesting the Job information from the slurmctld was + not successful. + MemoryError: If malloc failed to allocate memory. + + Examples: + >>> import pyslurm + >>> job = pyslurm.Job.load(9999) + """ + cdef: + job_info_msg_t *info = NULL + Job wrap = Job.__new__(Job) + + try: + verify_rpc(slurm_load_job(&info, job_id, slurm.SHOW_DETAIL)) + + if info and info.record_count: + # Copy info + wrap._alloc_impl() + memcpy(wrap.ptr, &info.job_array[0], sizeof(slurm_job_info_t)) + info.record_count = 0 + + if not slurm.IS_JOB_PENDING(wrap.ptr): + # Just ignore if the steps couldn't be loaded here. + try: + wrap.steps = JobSteps._load(wrap) + except RPCError: + pass + else: + raise RPCError(msg=f"RPC was successful but got no job data, " + "this should never happen") + except Exception as e: + raise e + finally: + slurm_free_job_info_msg(info) + + return wrap + + @staticmethod + cdef Job from_ptr(slurm_job_info_t *in_ptr): + cdef Job wrap = Job.__new__(Job) + wrap._alloc_impl() + wrap.passwd = {} + wrap.groups = {} + wrap.steps = JobSteps.__new__(JobSteps) + memcpy(wrap.ptr, in_ptr, sizeof(slurm_job_info_t)) + + return wrap + + cdef _swap_data(Job dst, Job src): + cdef slurm_job_info_t *tmp = NULL + if dst.ptr and src.ptr: + tmp = dst.ptr + dst.ptr = src.ptr + src.ptr = tmp + + def as_dict(self): + """Job information formatted as a dictionary. + + Returns: + (dict): Job information as dict + """ + return instance_to_dict(self) + + def send_signal(self, signal, steps="children", hurry=False): + """Send a signal to a running Job. + + Implements the slurm_signal_job RPC. + + Args: + signal (Union[str, int]): + Any valid signal which will be sent to the Job. Can be either + a str like 'SIGUSR1', or simply an int. + steps (str): + Selects which steps should be signaled. Valid values for this + are: "all", "batch" and "children". The default value is + "children", where all steps except the batch-step will be + signaled. + The value "batch" in contrast means, that only the batch-step + will be signaled. With "all" every step is signaled. + hurry (bool): + If True, no burst buffer data will be staged out. The default + value is False. + + Raises: + RPCError: When sending the signal was not successful. + + Examples: + Specifying the signal as a string: + + >>> from pyslurm import Job + >>> Job(9999).send_signal("SIGUSR1") + + or passing in a numeric signal: + + >>> Job(9999).send_signal(9) + """ + cdef uint16_t flags = 0 + + if steps.casefold() == "all": + flags |= slurm.KILL_FULL_JOB + elif steps.casefold() == "batch": + flags |= slurm.KILL_JOB_BATCH + + if hurry: + flags |= slurm.KILL_HURRY + + sig = signal_to_num(signal) + slurm_kill_job(self.id, sig, flags) + + # Ignore errors when the Job is already done or when SIGKILL was + # specified and the job id is already purged from slurmctlds memory. + errno = slurm_errno() + if (errno == slurm.ESLURM_ALREADY_DONE + or errno == slurm.ESLURM_INVALID_JOB_ID and sig == 9): + pass + else: + verify_rpc(errno) + + def cancel(self): + """Cancel a Job. + + Implements the slurm_kill_job RPC. + + Raises: + RPCError: When cancelling the Job was not successful. + + Examples: + >>> from pyslurm import Job + >>> Job(9999).cancel() + """ + self.send_signal(9) + + def suspend(self): + """Suspend a running Job. + + Implements the slurm_suspend RPC. + + Raises: + RPCError: When suspending the Job was not successful. + + Examples: + >>> from pyslurm import Job + >>> Job(9999).suspend() + """ + # TODO: Report as a misbehaviour to schedmd that slurm_suspend is not + # correctly returning error code when it cannot find the job in + # _slurm_rpc_suspend it should return ESLURM_INVALID_JOB_ID, but + # returns -1 + # https://github.com/SchedMD/slurm/blob/master/src/slurmctld/proc_req.c#L4693 + verify_rpc(slurm_suspend(self.id)) + + def unsuspend(self): + """Unsuspend a currently suspended Job. + + Implements the slurm_resume RPC. + + Raises: + RPCError: When unsuspending the Job was not successful. + + Examples: + >>> from pyslurm import Jobs + >>> Job(9999).unsuspend() + """ + # Same problem as described in suspend() + verify_rpc(slurm_resume(self.id)) + + def modify(self, JobSubmitDescription changes): + """Modify a Job. + + Implements the slurm_update_job RPC. + + Args: + changes (JobSubmitDescription): + A JobSubmitDescription object which contains all the + modifications that should be done on the Job. + + Raises: + RPCError: When updating the Job was not successful. + + Examples: + >>> from pyslurm import Job, JobSubmitDescription + >>> + >>> # Setting the new time-limit to 20 days + >>> changes = JobSubmitDescription(time_limit="20-00:00:00") + >>> Job(9999).modify(changes) + """ + changes._create_job_submit_desc(is_update=True) + changes.ptr.job_id = self.id + verify_rpc(slurm_update_job(changes.ptr)) + + def hold(self, mode=None): + """Hold a currently pending Job, preventing it from being scheduled. + + Args: + mode (str): + Determines in which mode the Job should be held. Possible + values are "user" or "admin". By default, the Job is held in + "admin" mode, meaning only an Administrator will be able to + release the Job again. If you specify the mode as "user", the + User will also be able to release the job. + + Note: + Uses the modify() function to set the Job's priority to 0. + + Raises: + RPCError: When holding the Job was not successful. + + Examples: + >>> from pyslurm import Job + >>> + >>> # Holding a Job (in "admin" mode by default) + >>> Job(9999).hold() + >>> + >>> # Holding a Job in "user" mode + >>> Job(9999).hold(mode="user") + """ + cdef JobSubmitDescription job_sub = JobSubmitDescription(priority=0) + + if mode and mode.casefold() == "user": + job_sub.ptr.alloc_sid = slurm.ALLOC_SID_USER_HOLD + + self.modify(job_sub) + + def release(self): + """Release a currently held Job, allowing it to be scheduled again. + + Note: + Uses the modify() function to reset the priority back to + be controlled by the slurmctld's priority calculation routine. + + Raises: + RPCError: When releasing a held Job was not successful. + + Examples: + >>> from pyslurm import Job + >>> Job(9999).release() + """ + self.modify(JobSubmitDescription(priority=slurm.INFINITE)) + + def requeue(self, hold=False): + """Requeue a currently running Job. + + Implements the slurm_requeue RPC. + + Args: + hold (bool): + Controls whether the Job should be put in a held state or not. + Default for this is 'False', so it will not be held. + + Raises: + RPCError: When requeing the Job was not successful. + + Examples: + >>> from pyslurm import Job + >>> + >>> # Requeing a Job while allowing it to be + >>> # scheduled again immediately + >>> Job(9999).requeue() + >>> + >>> # Requeing a Job while putting it in a held state + >>> Job(9999).requeue(hold=True) + """ + cdef uint32_t flags = 0 + + if hold: + flags |= slurm.JOB_REQUEUE_HOLD + + verify_rpc(slurm_requeue(self.id, flags)) + + def notify(self, msg): + """Sends a message to the Jobs stdout. + + Implements the slurm_notify_job RPC. + + Args: + msg (str): + The message that should be sent. + + Raises: + RPCError: When sending the message to the Job was not successful. + + Examples: + >>> from pyslurm import Job + >>> Job(9999).notify("Hello Friends!") + """ + verify_rpc(slurm_notify_job(self.id, msg)) + + def get_batch_script(self): + """Return the content of the script for a Batch-Job. + + Note: + The string returned also includes all the "\n" characters + (new-line). + + Returns: + (str): The content of the batch script. + + Raises: + RPCError: When retrieving the Batch-Script for the Job was not + successful. + + Examples: + >>> from pyslurm import Job + >>> script = Job(9999).get_batch_script() + """ + # The code for this function was taken from here: + # https://github.com/SchedMD/slurm/blob/7162f15af8deaf02c3bbf940d59e818cdeb5c69d/src/api/job_info.c#L1319 + # and therefore reimplements the slurm_job_batch_script API call, with + # slight modifications (e.g. Cython syntax). Otherwise we would have + # to parse the FILE* ptr we get from it back into a char* which + # would be a bit silly. + # + # The copyright notices for the file this function was taken from is + # included below: + # + # Portions Copyright (C) 2010-2017 SchedMD LLC . + # Copyright (C) 2002-2007 The Regents of the University of California. + # Copyright (C) 2008-2010 Lawrence Livermore National Security. + # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + # Written by Morris Jette et. al. + # CODE-OCEC-09-009. All rights reserved. + # + # Slurm is licensed under the GNU General Public License. For the full + # text of Slurm's License, please see here: + # pyslurm/slurm/SLURM_LICENSE + # + # Please, as mentioned above, also have a look at Slurm's DISCLAIMER + # under pyslurm/slurm/SLURM_DISCLAIMER + cdef: + job_id_msg_t msg + slurm_msg_t req + slurm_msg_t resp + int rc = slurm.SLURM_SUCCESS + str script = None + + slurm_msg_t_init(&req) + slurm_msg_t_init(&resp) + + memset(&msg, 0, sizeof(msg)) + msg.job_id = self.id + req.msg_type = slurm.REQUEST_BATCH_SCRIPT + req.data = &msg + + rc = slurm_send_recv_controller_msg(&req, &resp, working_cluster_rec) + verify_rpc(rc) + + if resp.msg_type == slurm.RESPONSE_BATCH_SCRIPT: + script = cstr.to_unicode(resp.data) + xfree(resp.data) + elif resp.msg_type == slurm.RESPONSE_SLURM_RC: + rc = ( resp.data).return_code + slurm_free_return_code_msg(resp.data) + verify_rpc(rc) + else: + verify_rpc(slurm.SLURM_ERROR) + + return script + + @property + def name(self): + return cstr.to_unicode(self.ptr.name) + + @property + def id(self): + return self.ptr.job_id + + @property + def association_id(self): + return u32_parse(self.ptr.assoc_id) + + @property + def account(self): + return cstr.to_unicode(self.ptr.account) + + @property + def user_id(self): + return u32_parse(self.ptr.user_id, zero_is_noval=False) + + @property + def user_name(self): + return uid_to_name(self.ptr.user_id, lookup=self.passwd) + + @property + def group_id(self): + return u32_parse(self.ptr.group_id, zero_is_noval=False) + + @property + def group_name(self): + return gid_to_name(self.ptr.group_id, lookup=self.groups) + + @property + def priority(self): + return u32_parse(self.ptr.priority, zero_is_noval=False) + + @property + def nice(self): + if self.ptr.nice == slurm.NO_VAL: + return None + + return self.ptr.nice - slurm.NICE_OFFSET + + @property + def qos(self): + return cstr.to_unicode(self.ptr.qos) + + @property + def min_cpus_per_node(self): + return u32_parse(self.ptr.pn_min_cpus) + + # I don't think this is used anymore - there is no way in sbatch to ask + # for a "maximum cpu" count, so it will always be empty. + # @property + # def max_cpus(self): + # """Maximum Amount of CPUs the Job requested.""" + # return u32_parse(self.ptr.max_cpus) + + @property + def state(self): + return cstr.to_unicode(slurm_job_state_string(self.ptr.job_state)) + + @property + def state_reason(self): + if self.ptr.state_desc: + return cstr.to_unicode(self.ptr.state_desc) + + return cstr.to_unicode(slurm_job_reason_string(self.ptr.state_reason)) + + @property + def is_requeueable(self): + return u16_parse_bool(self.ptr.requeue) + + @property + def requeue_count(self): + return u16_parse(self.ptr.restart_cnt, on_noval=0) + + @property + def is_batch_job(self): + return u16_parse_bool(self.ptr.batch_flag) + + @property + def requires_node_reboot(self): + return u8_parse_bool(self.ptr.reboot) + + @property + def dependencies(self): + return dependency_str_to_dict(cstr.to_unicode(self.ptr.dependency)) + + @property + def time_limit(self): + return _raw_time(self.ptr.time_limit) + + @property + def time_limit_min(self): + return _raw_time(self.ptr.time_min) + + @property + def submit_time(self): + return _raw_time(self.ptr.submit_time) + + @property + def eligible_time(self): + return _raw_time(self.ptr.eligible_time) + + @property + def accrue_time(self): + return _raw_time(self.ptr.accrue_time) + + @property + def start_time(self): + return _raw_time(self.ptr.start_time) + + @property + def resize_time(self): + return _raw_time(self.ptr.resize_time) + + @property + def deadline(self): + return _raw_time(self.ptr.deadline) + + @property + def preempt_eligible_time(self): + return _raw_time(self.ptr.preemptable_time) + + @property + def preempt_time(self): + return _raw_time(self.ptr.preempt_time) + + @property + def suspend_time(self): + return _raw_time(self.ptr.suspend_time) + + @property + def last_sched_evaluation_time(self): + return _raw_time(self.ptr.last_sched_eval) + + @property + def pre_suspension_time(self): + return _raw_time(self.ptr.pre_sus_time) + + @property + def mcs_label(self): + return cstr.to_unicode(self.ptr.mcs_label) + + @property + def partition(self): + return cstr.to_unicode(self.ptr.partition) + + @property + def submit_host(self): + return cstr.to_unicode(self.ptr.alloc_node) + + @property + def batch_host(self): + return cstr.to_unicode(self.ptr.batch_host) + + @property + def num_nodes(self): + return u32_parse(self.ptr.num_nodes) + + @property + def max_nodes(self): + return u32_parse(self.ptr.max_nodes) + + @property + def allocated_nodes(self): + return cstr.to_unicode(self.ptr.nodes) + + @property + def required_nodes(self): + return cstr.to_unicode(self.ptr.req_nodes) + + @property + def excluded_nodes(self): + return cstr.to_unicode(self.ptr.exc_nodes) + + @property + def scheduled_nodes(self): + return cstr.to_unicode(self.ptr.sched_nodes) + + @property + def derived_exit_code(self): + if (self.ptr.derived_ec == slurm.NO_VAL + or not WIFEXITED(self.ptr.derived_ec)): + return None + + return WEXITSTATUS(self.ptr.derived_ec) + + @property + def derived_exit_code_signal(self): + if (self.ptr.derived_ec == slurm.NO_VAL + or not WIFSIGNALED(self.ptr.derived_ec)): + return None + + return WTERMSIG(self.ptr.derived_ec) + + @property + def exit_code(self): + if (self.ptr.exit_code == slurm.NO_VAL + or not WIFEXITED(self.ptr.exit_code)): + return None + + return WEXITSTATUS(self.ptr.exit_code) + + @property + def exit_code_signal(self): + if (self.ptr.exit_code == slurm.NO_VAL + or not WIFSIGNALED(self.ptr.exit_code)): + return None + + return WTERMSIG(self.ptr.exit_code) + + @property + def batch_constraints(self): + return cstr.to_list(self.ptr.batch_features) + + @property + def federation_origin(self): + return cstr.to_unicode(self.ptr.fed_origin_str) + + @property + def federation_siblings_active(self): + return u64_parse(self.ptr.fed_siblings_active) + + @property + def federation_siblings_viable(self): + return u64_parse(self.ptr.fed_siblings_viable) + + @property + def cpus(self): + return u32_parse(self.ptr.num_cpus, on_noval=1) + + @property + def cpus_per_task(self): + if self.ptr.cpus_per_tres: + return None + + return u16_parse(self.ptr.cpus_per_task, on_noval=1) + + @property + def cpus_per_gpu(self): + if (not self.ptr.cpus_per_tres + or self.ptr.cpus_per_task != slurm.NO_VAL16): + return None + + # TODO: Make a function that, given a GRES type, safely extracts its + # value from the string. + val = cstr.to_unicode(self.ptr.cpus_per_tres).split(":")[2] + return u16_parse(val) + + @property + def boards_per_node(self): + return u16_parse(self.ptr.boards_per_node) + + @property + def sockets_per_board(self): + return u16_parse(self.ptr.sockets_per_board) + + @property + def sockets_per_node(self): + return u16_parse(self.ptr.sockets_per_node) + + @property + def cores_per_socket(self): + return u16_parse(self.ptr.cores_per_socket) + + @property + def threads_per_core(self): + return u16_parse(self.ptr.threads_per_core) + + @property + def ntasks(self): + return u32_parse(self.ptr.num_tasks, on_noval=1) + + @property + def ntasks_per_node(self): + return u16_parse(self.ptr.ntasks_per_node) + + @property + def ntasks_per_board(self): + return u16_parse(self.ptr.ntasks_per_board) + + @property + def ntasks_per_socket(self): + return u16_parse(self.ptr.ntasks_per_socket) + + @property + def ntasks_per_core(self): + return u16_parse(self.ptr.ntasks_per_core) + + @property + def ntasks_per_gpu(self): + return u16_parse(self.ptr.ntasks_per_tres) + + @property + def delay_boot_time(self): + return _raw_time(self.ptr.delay_boot) + + @property + def constraints(self): + return cstr.to_list(self.ptr.features) + + @property + def cluster(self): + return cstr.to_unicode(self.ptr.cluster) + + @property + def cluster_constraints(self): + return cstr.to_list(self.ptr.cluster_features) + + @property + def reservation(self): + return cstr.to_unicode(self.ptr.resv_name) + + @property + def resource_sharing(self): + return cstr.to_unicode(slurm_job_share_string(self.ptr.shared)) + + @property + def requires_contiguous_nodes(self): + return u16_parse_bool(self.ptr.contiguous) + + @property + def licenses(self): + return cstr.to_list(self.ptr.licenses) + + @property + def network(self): + return cstr.to_unicode(self.ptr.network) + + @property + def command(self): + return cstr.to_unicode(self.ptr.command) + + @property + def working_directory(self): + return cstr.to_unicode(self.ptr.work_dir) + + @property + def admin_comment(self): + return cstr.to_unicode(self.ptr.admin_comment) + + @property + def system_comment(self): + return cstr.to_unicode(self.ptr.system_comment) + + @property + def container(self): + return cstr.to_unicode(self.ptr.container) + + @property + def comment(self): + return cstr.to_unicode(self.ptr.comment) + + @property + def standard_input(self): + cdef char tmp[1024] + slurm_get_job_stdin(tmp, sizeof(tmp), self.ptr) + return cstr.to_unicode(tmp) + + @property + def standard_output(self): + cdef char tmp[1024] + slurm_get_job_stdout(tmp, sizeof(tmp), self.ptr) + return cstr.to_unicode(tmp) + + @property + def standard_error(self): + cdef char tmp[1024] + slurm_get_job_stderr(tmp, sizeof(tmp), self.ptr) + return cstr.to_unicode(tmp) + + @property + def required_switches(self): + return u32_parse(self.ptr.req_switch) + + @property + def max_wait_time_switches(self): + return _raw_time(self.ptr.wait4switch) + + @property + def burst_buffer(self): + return cstr.to_unicode(self.ptr.burst_buffer) + + @property + def burst_buffer_state(self): + return cstr.to_unicode(self.ptr.burst_buffer_state) + + @property + def cpu_frequency_min(self): + return cpu_freq_int_to_str(self.ptr.cpu_freq_min) + + @property + def cpu_frequency_max(self): + return cpu_freq_int_to_str(self.ptr.cpu_freq_max) + + @property + def cpu_frequency_governor(self): + return cpu_freq_int_to_str(self.ptr.cpu_freq_gov) + + # @property + # def tres_bindings(self): + # """str: ?""" + # # TODO: Find out how it works + # return cstr.to_unicode(self.ptr.tres_bind) + + # @property + # def tres_frequency(self): + # """?""" + # # TODO: Find out how it works + # return cstr.to_unicode(self.ptr.tres_freq) + + @property + def wckey(self): + return cstr.to_unicode(self.ptr.wckey) + + @property + def mail_user(self): + return cstr.to_list(self.ptr.mail_user) + + @property + def mail_types(self): + return mail_type_int_to_list(self.ptr.mail_type) + + @property + def heterogeneous_id(self): + return u32_parse(self.ptr.het_job_id, noval=0) + + @property + def heterogeneous_offset(self): + return u32_parse(self.ptr.het_job_offset, noval=0) + + # @property + # def hetjob_component_ids(self): + # """str: ?""" + # # TODO: Find out how to parse it in a more proper way? + # return cstr.to_unicode(self.ptr.het_job_id_set) + + @property + def temporary_disk_per_node(self): + return u32_parse(self.ptr.pn_min_tmp_disk) + + @property + def array_id(self): + return u32_parse(self.ptr.array_job_id) + + @property + def array_tasks_parallel(self): + return u32_parse(self.ptr.array_max_tasks) + + @property + def array_task_id(self): + return u32_parse(self.ptr.array_task_id) + + @property + def array_tasks_waiting(self): + task_str = cstr.to_unicode(self.ptr.array_task_str) + if not task_str: + return None + + if "%" in task_str: + # We don't want this % character and everything after it + # in here, so remove it. + task_str = task_str[:task_str.rindex("%")] + + return task_str + + @property + def end_time(self): + return _raw_time(self.ptr.end_time) + + # https://github.com/SchedMD/slurm/blob/d525b6872a106d32916b33a8738f12510ec7cf04/src/api/job_info.c#L480 + cdef _calc_run_time(self): + cdef time_t rtime + cdef time_t etime + + if slurm.IS_JOB_PENDING(self.ptr) or not self.ptr.start_time: + return 0 + elif slurm.IS_JOB_SUSPENDED(self.ptr): + return self.pre_suspension_time + else: + if slurm.IS_JOB_RUNNING(self.ptr) or self.ptr.end_time == 0: + etime = ctime.time(NULL) + else: + etime = self.ptr.end_time + + if self.ptr.suspend_time: + rtime = ctime.difftime(etime, self.ptr.suspend_time) + rtime += self.ptr.pre_sus_time + else: + rtime = ctime.difftime(etime, self.ptr.start_time) + + return u64_parse(rtime, on_noval=0) + + @property + def run_time(self): + return self._calc_run_time() + + @property + def cores_reserved_for_system(self): + if self.ptr.core_spec != slurm.NO_VAL16: + if not self.ptr.core_spec & slurm.CORE_SPEC_THREAD: + return self.ptr.core_spec + + @property + def threads_reserved_for_system(self): + if self.ptr.core_spec != slurm.NO_VAL16: + if self.ptr.core_spec & slurm.CORE_SPEC_THREAD: + return self.ptr.core_spec & (~slurm.CORE_SPEC_THREAD) + + @property + def memory(self): + mem_cpu = self.memory_per_cpu + if mem_cpu is not None: + total_cpus = self.cpus + if total_cpus is not None: + mem_cpu *= total_cpus + return mem_cpu + + mem_node = self.memory_per_node + if mem_node is not None: + num_nodes = self.min_nodes + if num_nodes is not None: + mem_node *= num_nodes + return mem_cpu + + # TODO + # mem_gpu = self.memory_per_gpu + # if mem_gpu is not None: + # num_nodes = self.min_nodes + # if num_nodes is not None: + # mem_node *= num_nodes + # return mem_cpu + + return None + + @property + def memory_per_cpu(self): + if self.ptr.pn_min_memory != slurm.NO_VAL64: + if self.ptr.pn_min_memory & slurm.MEM_PER_CPU: + mem = self.ptr.pn_min_memory & (~slurm.MEM_PER_CPU) + return u64_parse(mem) + else: + return None + + @property + def memory_per_node(self): + if self.ptr.pn_min_memory != slurm.NO_VAL64: + if not self.ptr.pn_min_memory & slurm.MEM_PER_CPU: + return u64_parse(self.ptr.pn_min_memory) + else: + return None + + @property + def memory_per_gpu(self): + if self.ptr.mem_per_tres and self.ptr.pn_min_memory == slurm.NO_VAL64: + # TODO: Make a function that, given a GRES type, safely extracts + # its value from the string. + mem = int(cstr.to_unicode(self.ptr.mem_per_tres).split(":")[2]) + return u64_parse(mem) + else: + return None + + @property + def gres_per_node(self): + return cstr.to_gres_dict(self.ptr.tres_per_node) + + @property + def profile_types(self): + return acctg_profile_int_to_list(self.ptr.profile) + + @property + def gres_binding(self): + if self.ptr.bitflags & slurm.GRES_ENFORCE_BIND: + return "enforce-binding" + elif self.ptr.bitflags & slurm.GRES_DISABLE_BIND: + return "disable-binding" + else: + return None + + @property + def kill_on_invalid_dependency(self): + return u64_parse_bool_flag(self.ptr.bitflags, slurm.KILL_INV_DEP) + + @property + def spreads_over_nodes(self): + return u64_parse_bool_flag(self.ptr.bitflags, slurm.SPREAD_JOB) + + @property + def power_options(self): + return power_type_int_to_list(self.ptr.power_flags) + + @property + def is_cronjob(self): + return u64_parse_bool_flag(self.ptr.bitflags, slurm.CRON_JOB) + + @property + def cronjob_time(self): + return cstr.to_unicode(self.ptr.cronspec) + + @property + def cpu_time(self): + return self.cpus * self.run_time + + @property + def pending_time(self): + # TODO + return None + + @property + def run_time_left(self): + # TODO + return None + + def get_resource_layout_per_node(self): + """Retrieve the resource layout of this Job on each node. + + This contains the following information: + * cpu_ids (str) + * gres (dict) + * memory (int) + + Returns: + (dict): Resource layout + """ + # The code for this function is a modified reimplementation from here: + # https://github.com/SchedMD/slurm/blob/d525b6872a106d32916b33a8738f12510ec7cf04/src/api/job_info.c#L739 + # + # The copyright notices for the file that contains the original code + # is below: + # + # Portions Copyright (C) 2010-2017 SchedMD LLC . + # Copyright (C) 2002-2007 The Regents of the University of California. + # Copyright (C) 2008-2010 Lawrence Livermore National Security. + # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + # Written by Morris Jette et. al. + # CODE-OCEC-09-009. All rights reserved. + # + # Slurm is licensed under the GNU General Public License. For the full + # text of Slurm's License, please see here: + # pyslurm/slurm/SLURM_LICENSE + # + # Please, as mentioned above, also have a look at Slurm's DISCLAIMER + # under pyslurm/slurm/SLURM_DISCLAIMER + # + # TODO: Explain the structure of the return value a bit more. + cdef: + slurm.job_resources *resources = self.ptr.job_resrcs + slurm.hostlist_t hl + uint32_t rel_node_inx + int bit_inx = 0 + int bit_reps = 0 + int sock_inx = 0 + uint32_t sock_reps = 0 + int i = 0, j + uint32_t k = 0 + char *host + char *gres = NULL + slurm.bitstr_t *cpu_bitmap + char cpu_bitmap_str[128] + uint32_t threads + dict output = {} + + if not resources or not resources.core_bitmap: + return output + + hl = slurm.slurm_hostlist_create(resources.nodes) + if not hl: + raise ValueError("Unable to create hostlist.") + + for rel_node_inx in range(resources.nhosts): + # Check how many consecutive nodes have the same cpu allocation + # layout. + if sock_reps >= resources.sock_core_rep_count[sock_inx]: + sock_inx += 1 + sock_reps = 0 + sock_reps += 1 + + # Get the next node from the list of nodenames + host = slurm.slurm_hostlist_shift(hl) + + # How many rounds we have to do in order to calculate the complete + # cpu bitmap. + bit_reps = (resources.sockets_per_node[sock_inx] + * resources.cores_per_socket[sock_inx]) + + # Calculate the amount of threads per core this job has on the + # specific host. + threads = _threads_per_core(host) + + # Allocate a new, big enough cpu bitmap + cpu_bitmap = slurm.slurm_bit_alloc(bit_reps * threads) + + # Calculate the cpu bitmap for this host. + for j in range(bit_reps): + if slurm.slurm_bit_test(resources.core_bitmap, bit_inx): + for k in range(threads): + slurm.slurm_bit_set(cpu_bitmap, (j*threads)+k) + bit_inx += 1 + + # Extract the cpu bitmap into a char *cpu_bitmap_str + slurm.slurm_bit_fmt(cpu_bitmap_str, + sizeof(cpu_bitmap_str), cpu_bitmap) + slurm.slurm_bit_free(&cpu_bitmap) + + nodename = cstr.to_unicode(host) + cpu_ids = cstr.to_unicode(cpu_bitmap_str) + mem = None + + if rel_node_inx < self.ptr.gres_detail_cnt: + gres = self.ptr.gres_detail_str[rel_node_inx] + + if resources.memory_allocated: + mem = u64_parse(resources.memory_allocated[rel_node_inx]) + + if nodename: + output[nodename] = { + "cpu_ids": cpu_ids, + "gres": cstr.to_gres_dict(gres), + "memory": mem, + } + + free(host) + + slurm.slurm_hostlist_destroy(hl) + return output + + +# https://github.com/SchedMD/slurm/blob/d525b6872a106d32916b33a8738f12510ec7cf04/src/api/job_info.c#L99 +cdef _threads_per_core(char *host): + # TODO + return 1 diff --git a/pyslurm/core/job/sbatch_opts.pyx b/pyslurm/core/job/sbatch_opts.pyx new file mode 100644 index 00000000..91724d29 --- /dev/null +++ b/pyslurm/core/job/sbatch_opts.pyx @@ -0,0 +1,204 @@ +######################################################################### +# sbatch_opt.pyx - utilities to parse #SBATCH options +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +import re +from pathlib import Path + +SBATCH_MAGIC = "#SBATCH" + + +class _SbatchOpt(): + def __init__(self, short_opt, long_opt, + our_attr_name, attr_param=None, is_boolean=False, + has_optional_args=False): + self.short_opt = short_opt + self.long_opt = long_opt + self.our_attr_name = our_attr_name + self.attr_param = attr_param + self.is_boolean = is_boolean + self.has_optional_args = has_optional_args + + +# Sorted by occurence in the sbatch manpage - keep in order. +SBATCH_OPTIONS = [ + _SbatchOpt("A", "account", "account"), + _SbatchOpt(None, "acctg-freq", "accounting_gather_frequency"), + _SbatchOpt("a", "array", "array"), + _SbatchOpt(None, "batch", "batch_constraints"), + _SbatchOpt(None, "bb", "burst_buffer"), + _SbatchOpt(None, "bbf", "burst_buffer_file"), + _SbatchOpt("b", "begin", "begin_time"), + _SbatchOpt("D", "chdir", "working_directory"), + _SbatchOpt(None, "cluster-constraint", "cluster_constraints"), + _SbatchOpt("M", "clusters", "clusters"), + _SbatchOpt(None, "comment","comment"), + _SbatchOpt("C", "constraint", "constraints"), + _SbatchOpt(None, "container", "container"), + _SbatchOpt(None, "contiguous", "requires_contiguous_nodes"), + _SbatchOpt("S", "core-spec", "cores_reserved_for_system"), + _SbatchOpt(None, "cores-per-socket", "cores_per_socket"), + _SbatchOpt(None, "cpu-freq", "cpu_frequency"), + _SbatchOpt(None, "cpus-per-gpu", "cpus_per_gpu"), + _SbatchOpt("c", "cpus-per-task", "cpus_per_task"), + _SbatchOpt(None, "deadline", "deadline"), + _SbatchOpt(None, "delay-boot", "delay_boot_time"), + _SbatchOpt("d", "dependency", "dependencies"), + _SbatchOpt("m", "distribution", "distribution"), + _SbatchOpt("e", "error", "standard_error"), + _SbatchOpt("x", "exclude", "excluded_nodes"), + _SbatchOpt(None, "exclusive", "resource_sharing", "no"), + _SbatchOpt(None, "export", "environment"), + _SbatchOpt(None, "export-file", None), + _SbatchOpt("B", "extra-node-info", None), + _SbatchOpt(None, "get-user-env", "get_user_environment"), + _SbatchOpt(None, "gid", "group_id"), + _SbatchOpt(None, "gpu-bind", "gpu_binding"), + _SbatchOpt(None, "gpu-freq", None), + _SbatchOpt("G", "gpus", "gpus"), + _SbatchOpt(None, "gpus-per-node", "gpus_per_node"), + _SbatchOpt(None, "gpus-per-socket", "gpus_per_socket"), + _SbatchOpt(None, "gpus-per-socket", "gpus_per_task"), + _SbatchOpt(None, "gres", "gres_per_node"), + _SbatchOpt(None, "gres-flags", "gres_binding"), + _SbatchOpt(None, "hint", None), + _SbatchOpt("H", "hold", "priority", 0), + _SbatchOpt(None, "ignore-pbs", None), + _SbatchOpt("i", "input", "standard_in"), + _SbatchOpt("J", "job-name", "name"), + _SbatchOpt(None, "kill-on-invalid-dep", "kill_on_invalid_dependency"), + _SbatchOpt("L", "licenses", "licenses"), + _SbatchOpt(None, "mail-type", "mail_types"), + _SbatchOpt(None, "mail-user", "mail_user"), + _SbatchOpt(None, "mcs-label", "mcs_label"), + _SbatchOpt(None, "mem", "memory_per_node"), + _SbatchOpt(None, "mem-bind", None), + _SbatchOpt(None, "mem-per-cpu", "memory_per_cpu"), + _SbatchOpt(None, "mem-per-gpu", "memory_per_gpu"), + _SbatchOpt(None, "mincpus", "min_cpus_per_node"), + _SbatchOpt(None, "network", "network"), + _SbatchOpt(None, "nice", "nice"), + _SbatchOpt("k", "no-kill", "kill_on_node_fail", False), + _SbatchOpt(None, "no-requeue", "is_requeueable", False), + _SbatchOpt("F", "nodefile", None), + _SbatchOpt("w", "nodelist", "required_nodes"), + _SbatchOpt("N", "nodes", "nodes"), + _SbatchOpt("n", "ntasks", "ntasks"), + _SbatchOpt(None, "ntasks-per-core", "ntasks_per_core"), + _SbatchOpt(None, "ntasks-per-gpu", "ntasks_per_gpu"), + _SbatchOpt(None, "ntasks-per-node", "ntasks_per_node"), + _SbatchOpt(None, "ntasks-per-socket", "ntasks_per_socket"), + _SbatchOpt(None, "open-mode", "log_files_open_mode"), + _SbatchOpt("o", "output", "standard_output"), + _SbatchOpt("O", "overcommit", "overcommit", True), + _SbatchOpt("s", "oversubscribe", "resource_sharing", "yes"), + _SbatchOpt("p", "partition", "partition"), + _SbatchOpt(None, "power", "power_options"), + _SbatchOpt(None, "prefer", None), + _SbatchOpt(None, "priority", "priority"), + _SbatchOpt(None, "profile", "profile_types"), + _SbatchOpt(None, "propagate", None), + _SbatchOpt("q", "qos", "qos"), + _SbatchOpt(None, "reboot", "requires_node_reboot", True), + _SbatchOpt(None, "requeue", "is_requeueable", True), + _SbatchOpt(None, "reservation", "reservations"), + _SbatchOpt(None, "signal", "signal"), + _SbatchOpt(None, "sockets-per-node", "sockets_per_node"), + _SbatchOpt(None, "spread-job", "spreads_over_nodes", True), + _SbatchOpt(None, "switches", "switches"), + _SbatchOpt(None, "thread-spec", "threads_reserved_for_system"), + _SbatchOpt(None, "threads-per-core", "threads_per_core"), + _SbatchOpt("t", "time", "time_limit"), + _SbatchOpt(None, "time-min", "time_limit_min"), + _SbatchOpt(None, "tmp", "temporary_disk_per_node"), + _SbatchOpt(None, "uid", "user_id"), + _SbatchOpt(None, "use-min-nodes", "use_min_nodes", True), + _SbatchOpt(None, "wait-all-nodes", "wait_all_nodes", True), + _SbatchOpt(None, "wckey", "wckey"), +] + + +def _parse_line(line): + # Remove the #SBATCH from the start + opts = line[len("#SBATCH"):] + + # Ignore possible comments after the options + opts = opts.split("#")[0].strip() + + # Now the line can be in these forms for example: + # * -t20 or -t 20 + # * --time=20 or --time 20 or --time20 + if "=" in opts: + # -t=21 or --time=20 + opts = "=".join(opts.replace("=", " ").split()) + opt, val = opts.split("=") + elif " " in opts: + # --time 20 or -t 20 + opts = "=".join(opts.split()) + opt, val = opts.split("=") + elif any(el.isdigit() for el in opts): + # -t20 or --time20 + opt, val = list(filter(None, re.split(r'(\d+)', opts))) + else: + # Probably a boolean flag, like --exclusive or -O + opt, val = opts, None + + # Remove "-" or "--" at the front. + opt = opt[1:] + if opt[0] == "-": + # Found second dash. + opt = opt[1:] + + return opt, val + + +def _find_opt(opt): + for sbopt in SBATCH_OPTIONS: + # Check if we can find the option in our predefined mapping. + if opt == sbopt.short_opt or opt == sbopt.long_opt: + return sbopt + + return None + + +def _parse_opts_from_batch_script(desc, script, overwrite): + flags_and_vals = {} + + if not script or not Path(script).is_file(): + return None + + script = Path(script).read_text() + for line in script.splitlines(): + line = line.lstrip() + + if line.startswith(SBATCH_MAGIC): + flag, val = _parse_line(line) + opt = _find_opt(flag) + + if not opt or opt.our_attr_name is None: + # Not supported + continue + + if getattr(desc, opt.our_attr_name) is None or overwrite: + val = opt.attr_param if val is None else val + setattr(desc, opt.our_attr_name, val) diff --git a/pyslurm/core/job/step.pxd b/pyslurm/core/job/step.pxd new file mode 100644 index 00000000..4cdd6c49 --- /dev/null +++ b/pyslurm/core/job/step.pxd @@ -0,0 +1,139 @@ +######################################################################### +# job/step.pxd - interface to retrieve slurm job step informations +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t +from .job cimport Job + +from pyslurm cimport slurm +from pyslurm.slurm cimport ( + job_step_info_t, + slurm_get_job_steps, + job_step_info_response_msg_t, + step_update_request_msg_t, + slurm_free_job_step_info_response_msg, + slurm_init_update_step_msg, + slurm_free_update_step_msg, + slurm_free_job_step_info_response_msg, + slurm_free_job_step_info_members, + slurm_update_step, + slurm_signal_job_step, + slurm_kill_job_step, + slurm_job_state_string, + xfree, + try_xmalloc, +) + +cdef class JobSteps(dict): + """A collection of :obj:`JobStep` objects for a given Job. + + Args: + job (Union[Job, int]): + A Job for which the Steps should be loaded. + + Raises: + RPCError: When getting the Job steps from the slurmctld failed. + MemoryError: If malloc fails to allocate memory. + """ + + cdef: + job_step_info_response_msg_t *info + job_step_info_t tmp_info + + @staticmethod + cdef JobSteps _load(Job job) + + cdef dict _get_info(self, uint32_t job_id, int flags) + + +cdef class JobStep: + """A Slurm Jobstep + + Args: + job (Union[Job, int]): + The Job this Step belongs to. + step (Union[int, str]): + Step-ID for this JobStep object. + + Raises: + MemoryError: If malloc fails to allocate memory. + + Attributes: + id (Union[str, int]): + The id for this step. + job_id (int): + The id for the Job this step belongs to. + name (str): + Name of the step. + user_id (int): + User ID who owns this step. + user_name (str): + Name of the User who owns this step. + time_limit (int): + Time limit in Minutes for this step. + network (str): + Network specification for the step. + cpu_frequency_min (Union[str, int]): + Minimum CPU-Frequency requested. + cpu_frequency_max (Union[str, int]): + Maximum CPU-Frequency requested. + cpu_frequency_governor (Union[str, int]): + CPU-Frequency Governor requested. + reserved_ports (str): + Reserved ports for the step. + cluster (str): + Name of the cluster this step runs on. + srun_host (str): + Name of the host srun was executed on. + srun_process_id (int): + Process ID of the srun command. + container (str): + Path to the container OCI. + allocated_nodes (str): + Nodes the Job is using. + start_time (int): + Time this step started, as unix timestamp. + run_time (int): + Seconds this step has been running for. + partition (str): + Name of the partition this step runs in. + state (str): + State the step is in. + allocated_cpus (int): + Number of CPUs this step uses in total. + ntasks (int): + Number of tasks this step uses. + distribution (dict): + Task distribution specification for the step. + command (str): + Command that was specified with srun. + slurm_protocol_version (int): + Slurm protocol version in use. + """ + + cdef: + job_step_info_t *ptr + step_update_request_msg_t *umsg + + @staticmethod + cdef JobStep from_ptr(job_step_info_t *in_ptr) diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx new file mode 100644 index 00000000..d84330b1 --- /dev/null +++ b/pyslurm/core/job/step.pyx @@ -0,0 +1,463 @@ +######################################################################### +# job/step.pyx - interface to retrieve slurm job step informations +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from libc.string cimport memcpy, memset +from pyslurm.core.common cimport cstr, ctime +from pyslurm.core.common import cstr, ctime +from pyslurm.core.common.uint cimport * +from pyslurm.core.common.uint import * +from pyslurm.core.common.ctime cimport time_t +from pyslurm.core.error import RPCError, verify_rpc +from pyslurm.core.common import ( + signal_to_num, + instance_to_dict, + uid_to_name, +) +from pyslurm.core.job.util import cpu_freq_int_to_str +from pyslurm.core.job.task_dist cimport TaskDistribution + +from pyslurm.core.common.ctime import ( + secs_to_timestr, + mins_to_timestr, + timestr_to_mins, + timestamp_to_date, + _raw_time, +) + + +cdef class JobSteps(dict): + + def __dealloc__(self): + slurm_free_job_step_info_response_msg(self.info) + + def __cinit__(self): + self.info = NULL + + def __init__(self): + pass + + @staticmethod + def load(job): + cdef Job _job + _job = Job.load(job.id) if isinstance(job, Job) else Job.load(job) + return JobSteps._load(_job) + + @staticmethod + cdef JobSteps _load(Job job): + cdef JobSteps steps = JobSteps.__new__(JobSteps) + + step_info = steps._get_info(job.id, slurm.SHOW_ALL) + if not step_info and not slurm.IS_JOB_PENDING(job.ptr): + msg = f"Failed to load step info for Job {job.id}." + raise RPCError(msg=msg) + + # No super().__init__() needed? Cython probably already initialized + # the dict automatically. + steps.update(step_info[job.id]) + return steps + + cdef dict _get_info(self, uint32_t job_id, int flags): + cdef: + JobStep step + JobSteps steps + uint32_t cnt = 0 + dict out = {} + + rc = slurm_get_job_steps(0, job_id, slurm.NO_VAL, &self.info, + flags) + verify_rpc(rc) + + # zero-out a dummy job_step_info_t + memset(&self.tmp_info, 0, sizeof(job_step_info_t)) + + # Put each job-step pointer into its own "JobStep" instance. + for cnt in range(self.info.job_step_count): + step = JobStep.from_ptr(&self.info.job_steps[cnt]) + + # Prevent double free if xmalloc fails mid-loop and a MemoryError + # is raised by replacing it with a zeroed-out job_step_info_t. + self.info.job_steps[cnt] = self.tmp_info + + if not step.job_id in out: + steps = JobSteps.__new__(JobSteps) + out[step.job_id] = steps + + out[step.job_id].update({step.id: step}) + + # At this point we memcpy'd all the memory for the Steps. Setting this + # to 0 will prevent the slurm step free function to deallocate the + # memory for the individual steps. This should be fine, because they + # are free'd automatically in __dealloc__ since the lifetime of each + # step-pointer is tied to the lifetime of its corresponding JobStep + # instance. + self.info.job_step_count = 0 + + return out + + @staticmethod + def load_all(): + """Loads all the steps in the system. + + Returns: + (dict): A dict where every JobID (key) is mapped with an instance + of its JobSteps (value). + """ + cdef JobSteps steps = JobSteps.__new__(JobSteps) + return steps._get_info(slurm.NO_VAL, slurm.SHOW_ALL) + + +cdef class JobStep: + + def __cinit__(self): + self.ptr = NULL + self.umsg = NULL + + def __init__(self, job_id=0, step_id=0, **kwargs): + self._alloc_impl() + self.job_id = job_id.id if isinstance(job_id, Job) else job_id + self.id = step_id + + # Initialize attributes, if any were provided + for k, v in kwargs.items(): + setattr(self, k, v) + + def _alloc_info(self): + if not self.ptr: + self.ptr = try_xmalloc( + sizeof(job_step_info_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for job_step_info_t") + + def _alloc_umsg(self): + if not self.umsg: + self.umsg = try_xmalloc( + sizeof(step_update_request_msg_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for " + "step_update_request_msg_t") + slurm_init_update_step_msg(self.umsg) + + def _alloc_impl(self): + self._alloc_info() + self._alloc_umsg() + + def __dealloc__(self): + self._dealloc_impl() + + def _dealloc_impl(self): + slurm_free_job_step_info_members(self.ptr) + xfree(self.ptr) + slurm_free_update_step_msg(self.umsg) + self.umsg = NULL + + def __setattr__(self, name, val): + # When a user wants to set attributes on a instance that was created + # by calling JobSteps.load(), the "umsg" pointer is not yet allocated. + # We only allocate memory for it by the time the user actually wants + # to modify something. + self._alloc_umsg() + # Call descriptors __set__ directly + JobStep.__dict__[name].__set__(self, val) + + @staticmethod + def load(job_id, step_id): + """Load information for a specific job step. + + Implements the slurm_get_job_steps RPC. + + Args: + job_id (Union[Job, int]): + ID of the Job the Step belongs to. + step_id (Union[int, str]): + Step-ID for the Step to be loaded. + + Returns: + (pyslurm.JobStep): Returns a new JobStep instance + + Raises: + RPCError: When retrieving Step information from the slurmctld was + not successful. + MemoryError: If malloc failed to allocate memory. + + Examples: + >>> import pyslurm + >>> jobstep = pyslurm.JobStep.load(9999, 1) + """ + cdef: + job_step_info_response_msg_t *info = NULL + JobStep wrap = JobStep.__new__(JobStep) + + job_id = job_id.id if isinstance(job_id, Job) else job_id + rc = slurm_get_job_steps(0, job_id, dehumanize_step_id(step_id), + &info, slurm.SHOW_ALL) + verify_rpc(rc) + + if info and info.job_step_count == 1: + # Copy new info + wrap._alloc_impl() + memcpy(wrap.ptr, &info.job_steps[0], sizeof(job_step_info_t)) + info.job_step_count = 0 + slurm_free_job_step_info_response_msg(info) + else: + slurm_free_job_step_info_response_msg(info) + msg = f"Step {step_id} of Job {job_id} not found." + raise RPCError(msg=msg) + + return wrap + + @staticmethod + cdef JobStep from_ptr(job_step_info_t *in_ptr): + cdef JobStep wrap = JobStep.__new__(JobStep) + wrap._alloc_info() + memcpy(wrap.ptr, in_ptr, sizeof(job_step_info_t)) + return wrap + + def send_signal(self, signal): + """Send a signal to a running Job step. + + Implements the slurm_signal_job_step RPC. + + Args: + signal (Union[str, int]): + Any valid signal which will be sent to the Job. Can be either + a str like 'SIGUSR1', or simply an int. + + Raises: + RPCError: When sending the signal was not successful. + + Examples: + Specifying the signal as a string: + + >>> from pyslurm import JobStep + >>> JobStep(9999, 1).send_signal("SIGUSR1") + + or passing in a numeric signal: + + >>> JobStep(9999, 1).send_signal(9) + """ + step_id = self.ptr.step_id.step_id + sig = signal_to_num(signal) + verify_rpc(slurm_signal_job_step(self.job_id, step_id, sig)) + + def cancel(self): + """Cancel a Job step. + + Implements the slurm_kill_job_step RPC. + + Raises: + RPCError: When cancelling the Job was not successful. + + Examples: + >>> from pyslurm import JobStep + >>> JobStep(9999, 1).cancel() + """ + step_id = self.ptr.step_id.step_id + verify_rpc(slurm_kill_job_step(self.job_id, step_id, 9)) + + def modify(self, step=None, **kwargs): + """Modify a job step. + + Implements the slurm_update_step RPC. + + Args: + step (JobStep): + Another JobStep object which contains all the changes that + should be applied to this instance. + **kwargs: + You can also specify all the changes as keyword arguments. + Allowed values are only attributes which can actually be set + on a JobStep instance. If a step is explicitly specified as + parameter, all **kwargs will be ignored. + + Raises: + RPCError: When updating the JobStep was not successful. + + Examples: + >>> from pyslurm import JobStep + >>> + >>> # Setting the new time-limit to 20 days + >>> changes = JobStep(time_limit="20-00:00:00") + >>> JobStep(9999, 1).modify(changes) + >>> + >>> # Or by specifying the changes directly to the modify function + >>> JobStep(9999, 1).modify(time_limit="20-00:00:00") + """ + cdef JobStep js = self + + # Allow the user to both specify changes via object and **kwargs. + if step and isinstance(step, JobStep): + js = step + elif kwargs: + js = JobStep(**kwargs) + + js._alloc_umsg() + js.umsg.step_id = self.ptr.step_id.step_id + js.umsg.job_id = self.ptr.step_id.job_id + verify_rpc(slurm_update_step(js.umsg)) + + + def as_dict(self): + """JobStep information formatted as a dictionary. + + Returns: + (dict): JobStep information as dict + """ + return instance_to_dict(self) + + @property + def id(self): + return humanize_step_id(self.ptr.step_id.step_id) + + @id.setter + def id(self, val): + self.ptr.step_id.step_id = dehumanize_step_id(val) + + @property + def job_id(self): + return self.ptr.step_id.job_id + + @job_id.setter + def job_id(self, val): + self.ptr.step_id.job_id = int(val) + + @property + def name(self): + return cstr.to_unicode(self.ptr.name) + + @property + def user_id(self): + return u32_parse(self.ptr.user_id, zero_is_noval=False) + + @property + def user_name(self): + return uid_to_name(self.ptr.user_id) + + @property + def time_limit(self): + return _raw_time(self.ptr.time_limit) + + @time_limit.setter + def time_limit(self, val): + self.umsg.time_limit=self.ptr.time_limit = timestr_to_mins(val) + + @property + def network(self): + return cstr.to_unicode(self.ptr.network) + + @property + def cpu_frequency_min(self): + return cpu_freq_int_to_str(self.ptr.cpu_freq_min) + + @property + def cpu_frequency_max(self): + return cpu_freq_int_to_str(self.ptr.cpu_freq_max) + + @property + def cpu_frequency_governor(self): + return cpu_freq_int_to_str(self.ptr.cpu_freq_gov) + + @property + def reserved_ports(self): + return cstr.to_unicode(self.ptr.resv_ports) + + @property + def cluster(self): + return cstr.to_unicode(self.ptr.cluster) + + @property + def srun_host(self): + return cstr.to_unicode(self.ptr.srun_host) + + @property + def srun_process_id(self): + return u32_parse(self.ptr.srun_pid) + + @property + def container(self): + return cstr.to_unicode(self.ptr.container) + + @property + def allocated_nodes(self): + return cstr.to_list(self.ptr.nodes) + + @property + def start_time(self): + return _raw_time(self.ptr.start_time) + + @property + def run_time(self): + return _raw_time(self.ptr.run_time) + + @property + def partition(self): + return cstr.to_unicode(self.ptr.partition) + + @property + def state(self): + return cstr.to_unicode(slurm_job_state_string(self.ptr.state)) + + @property + def alloc_cpus(self): + return u32_parse(self.ptr.num_cpus) + + @property + def ntasks(self): + return u32_parse(self.ptr.num_tasks) + + @property + def distribution(self): + return TaskDistribution.from_int(self.ptr.task_dist) + + @property + def command(self): + return cstr.to_unicode(self.ptr.submit_line) + + @property + def slurm_protocol_version(self): + return u32_parse(self.ptr.start_protocol_ver) + + +def humanize_step_id(sid): + if sid == slurm.SLURM_BATCH_SCRIPT: + return "batch" + elif sid == slurm.SLURM_EXTERN_CONT: + return "extern" + elif sid == slurm.SLURM_INTERACTIVE_STEP: + return "interactive" + elif sid == slurm.SLURM_PENDING_STEP: + return "pending" + else: + return sid + +def dehumanize_step_id(sid): + if sid == "batch": + return slurm.SLURM_BATCH_SCRIPT + elif sid == "extern": + return slurm.SLURM_EXTERN_CONT + elif sid == "interactive": + return slurm.SLURM_INTERACTIVE_STEP + elif sid == "pending": + return slurm.SLURM_PENDING_STEP + else: + return int(sid) diff --git a/pyslurm/core/job/submission.pxd b/pyslurm/core/job/submission.pxd new file mode 100644 index 00000000..ebf0b0c5 --- /dev/null +++ b/pyslurm/core/job/submission.pxd @@ -0,0 +1,619 @@ +######################################################################### +# submission.pxd - interface for submitting slurm jobs +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.slurm cimport ( + job_desc_msg_t, + slurm_init_job_desc_msg, + slurm_free_job_desc_msg, + submit_response_msg_t, + slurm_submit_batch_job, + slurm_free_submit_response_response_msg, + slurm_env_array_free, + slurm_env_array_create, + slurm_env_array_merge, + slurm_env_array_overwrite, + slurm_job_share_string, + xfree, + try_xmalloc, +) + + +cdef class JobSubmitDescription: + """Description of a Slurm Job. + + Attributes: + name (str): + Name of the Job, same as -J/--job-name from sbatch. + account (str): + Account of the job, same as -A/--account from sbatch. + user_id (Union[str, int]): + Run the job as a different User, same as --uid from sbatch. + This requires root privileges. + You can both specify the name or numeric uid of the User. + group_id (Union[str, int]): + Run the job as a different Group, same as --gid from sbatch. + This requires root privileges. + You can both specify the name or numeric gid of the User. + priority (int): + Specific priority the Job will receive. + Same as --priority from sbatch. + You can achieve the behaviour of sbatch's --hold option by + specifying a priority of 0. + site_factor (int): + Site Factor of the Job. Only used when updating an existing Job. + wckey (str): + WCKey to use with the Job, same as --wckey from sbatch. + array (str): + Job Array specification, same as -a/--array from sbatch. + batch_constraints (str): + Batch Features of a Job, same as --batch from sbatch. + begin_time (str): + Defer allocation until the specified time, same as --begin from + sbatch. + clusters (Union[list, str]): + Clusters the job may run on, same as -M/--clusters from sbatch. + cluster_constraints (str): + Comma-separated str with cluster constraints for the job. + This is the same as --cluster-constraint from sbatch. + comment (str): + Arbitrary job comment, same as --comment from sbatch. + admin_comment (str): + Arbitrary job admin comment. + Only used when updating an existing job. + requires_contiguous_nodes (bool): + Whether allocated Nodes are required to form a contiguous set. + Same as --contiguous from sbatch. + cores_reserved_for_system (int): + Count of cores reserved for system not usable by the Job. + Same as -S/--core-spec from sbatch. + Mutually exclusive with `threads_reserved_for_system`. + threads_reserved_for_system (int): + Count of threads reserved for system not usable by the Job. + Same as --thread-spec from sbatch. + Mutually exclusive with `cores_reserved_for_system`. + working_directory (str): + Work directory for the Job. Default is current work-dir from where + the job was submitted. + Same as -D/--chdir from sbatch. + cpu_frequency (Union[dict, str]): + CPU Frequency for the Job, same as --cpu-freq from sbatch. + + Examples: + Specifying it as a dict: + + cpu_frequency = { + "min": "Low", + "max": "High", + "governor": "UserSpace" + } + + or like in sbatch with a string. For more info on that, check + out the sbatch documentation for --cpu-freq. + + If you only want to set a Governor without any min or max, you + can simply specify it as a standalone string: + + cpu_frequency = "Performance" + or + cpu_frequency = {"governor": "Performance"} + + If you want to set a specific, fixed frequency, you can do: + + cpu_frequency = + or either + cpu_frequency = {"max": } or cpu_freq = {"min": } + nodes (Union[dict, str, int]): + Amount of nodes needed for the job. + This is the same as -N/--nodes from sbatch. + + Examples: + Providing min/max nodes as a dict: + + nodes = { + "min": 3, + "max": 6 + } + + When no range is needed, you can also simply specify it as + int: + + nodes = 3 + + Other than that, a range can also be specified in a str like + with sbatch: + + nodes = "1-5" + deadline (str): + Deadline specification for the Job, same as --deadline from + sbatch. + delay_boot_time (Union[str, int]): + Delay boot specification for the Job, same as --delay-boot from + sbatch. + dependencies (Union[dict, str]): + Dependencies for the Job, same as -d/--dependency from sbatch. + excluded_nodes (Union[list, str]): + Exclude specific nodes for this Job. + This is the same as -x/--exclude from sbatch. + required_nodes (Union[list, str]): + Specific list of nodes required for the Job. + This is the same as -w/--nodelist from sbatch. + constraints (str): + Required node features for the Job. + This is the same as -C/--constraint from sbatch. + kill_on_node_fail (bool): + Should the job get killed if one of the Nodes fails? + This is the same as -k/--no-kill from sbatch. + licenses (Union[list, str]): + A list of licenses for the Job. + This is the same as -L/--licenses from sbatch. + mail_user (Union[list, str]): + List of email addresses for notifications. + This is the same as --mail-user from sbatch. + mail_types (Union[list, str]): + List of mail flags. + This is the same as --mail-type from sbatch. + mcs_label (str): + An MCS Label for the Job. + This is the same as --mcs-label from sbatch. + memory_per_cpu (Union[str, int]): + Memory required per allocated CPU. + + The default unit is in Mebibytes. You are also able to specify + unit suffixes like K|M|G|T. + This is the same as --mem-per-cpu from sbatch. This is mutually + exclusive with memory_per_node and memory_per_gpu. + + Examples: + # 1 MiB + memory_per_cpu = 1024 + + # 3 GiB + memory_per_cpu = "3G" + memory_per_node (Union[str, int]): + Memory required per whole node. + + The default unit is in Mebibytes. You are also able to specify + unit suffixes like K|M|G|T. + This is the same as --mem from sbatch. This is mutually exclusive + with memory_per_cpu and memory_per_gpu. + + Examples: + # 1 MiB + memory_per_node = 1024 + + # 3 GiB + memory_per_node = "3G" + memory_per_gpu (Union[str, int]): + Memory required per GPU. + + The default unit is in Mebibytes. You are also able to specify + unit suffixes like K|M|G|T. + This is the same as --mem-per-gpu from sbatch. This is mutually + exclusive with memory_per_node and memory_per_cpu. + + Examples: + # 1 MiB + memory_per_gpu = 1024 + + # 3 GiB + memory_per_gpu = "3G" + network (str): + Network types for the Job. + This is the same as --network from sbatch. + nice (int): + Adjusted scheduling priority for the Job. + This is the same as --nice from sbatch. + log_files_open_mode (str): + Mode in which standard_output and standard_error log files should be opened. + + Valid options are: + * append + * truncate + + This is the same as --open-mode from sbatch. + overcommit (bool): + If the resources should be overcommitted. + This is the same as -O/--overcommit from sbatch. + partitions (Union[list, str]): + A list of partitions the Job may use. + This is the same as -p/--partition from sbatch. + power_options (list): + A list of power management plugin options for the Job. + This is the same as --power from sbatch. + accounting_gather_frequency (Union[dict, str]): + Interval for accounting info to be gathered. + This is the same as --acctg-freq from sbatch. + + Examples: + Specifying it as a dict: + + accounting_gather_frequency = { + energy=60, + network=20, + } + + or as a single string: + + accounting_gather_frequency = "energy=60,network=20" + qos (str): + Quality of Service for the Job. + This is the same as -q/--qos from sbatch. + requires_node_reboot (bool): + Force the allocated nodes to reboot before the job starts. + This is the same --reboot from sbatch. + is_requeueable (bool): + If the Job is eligible for requeuing. + This is the same as --requeue from sbatch. + reservations (Union[list, str]): + A list of possible reservations the Job can use. + This is the same as --reservation from sbatch. + script (str): + Absolute Path or content of the batch script. + + You can specify either a path to a script which will be loaded, or + you can pass the script as a string. + If the script is passed as a string, providing arguments to it + (see "script_args") is not supported. + script_args (str): + Arguments passed to the batch script. + You can only set arguments if a file path was specified for + "script". + environment (Union[dict, str]): + Environment variables to be set for the Job. + This is the same as --export from sbatch. + resource_sharing (str): + Controls the resource sharing with other Jobs. + + This property combines functionality of --oversubscribe and + --exclusive from sbatch. + + Allowed values are are: + + * "oversubscribe" or "yes": + The Job allows resources to be shared with other running Jobs. + + * "user" + Only sharing resources with other Jobs that have the "user" + option set is allowed + + * "mcs" + Only sharing resources with other Jobs that have the "mcs" + option set is allowed. + + * "no" or "exclusive" + No sharing of resources is allowed. (--exclusive from sbatch) + distribution (Union[dict, str]): + TODO + time_limit (str): + The time limit for the job. + This is the same as -t/--time from sbatch. + time_limit_min (str): + A minimum time limit for the Job. + This is the same as --time-min from sbatch. + container (str): + Path to an OCI container bundle. + This is the same as --container from sbatch. + cpus_per_task (int): + The amount of cpus required for each task. + + This is the same as -c/--cpus-per-task from sbatch. + This is mutually exclusive with cpus_per_gpu. + cpus_per_gpu (int): + The amount of cpus required for each allocated GPU. + + This is the same as --cpus-per-gpu from sbatch. + This is mutually exclusive with cpus_per_task. + sockets_per_node (int): + Restrict Job to nodes with atleast this many sockets. + This is the same as --sockets-per-node from sbatch. + cores_per_socket (int): + Restrict Job to nodes with atleast this many cores per socket + This is the same as --cores-per-socket from sbatch. + threads_per_core (int): + Restrict Job to nodes with atleast this many threads per socket + This is the same as --threads-per-core from sbatch. + gpus (Union[dict, str, int]): + GPUs for the Job to be allocated in total. + + This is the same as -G/--gpus from sbatch. + Specifying the type of the GPU is optional. + + Examples: + Specifying the GPU counts as a dict: + + gpus = { + "tesla": 1, + "volta": 5, + } + + Or, for example, in string format: + + gpus = "tesla:1,volta:5" + + Or, if you don't care about the type of the GPU: + + gpus = 6 + gpus_per_socket (Union[dict, str, int]): + GPUs for the Job to be allocated per socket. + + This is the same as --gpus-per-socket from sbatch. + + Specifying the type of the GPU is optional. Note that setting + gpus_per_socket requires to also specify sockets_per_node. + + Examples: + Specifying it as a dict: + + gpus_per_socket = { + "tesla": 1, + "volta": 5, + } + + Or, for example, in string format: + + gpus_per_socket = "tesla:1,volta:5" + + Or, if you don't care about the type of the GPU: + + gpus_per_socket = 6 + gpus_per_task (Union[dict, str, int]): + GPUs for the Job to be allocated per task. + + This is the same as --gpus-per-task from sbatch. + + Specifying the type of the GPU is optional. Note that setting + "gpus_per_task" requires to also specify either one of "ntasks" or + "gpus". + + Examples: + Specifying it as a dict: + + gpus_per_task = { + "tesla": 1, + "volta": 5, + } + + Or, for example, in string format: + + gpus_per_task = "tesla:1,volta:5" + + Or, if you don't care about the type of the GPU: + + gpus_per_task = 6 + gres_per_node (Union[dict, str]): + Generic resources to be allocated per node. + + This is the same as --gres from sbatch. You should also use this + option if you want to specify GPUs per node (--gpus-per-node). + Specifying the type (by seperating GRES name and type with a + semicolon) is optional. + + Examples: + Specifying it as a dict: + + gres_per_node = { + "gpu:tesla": 1, + "gpu:volta": 5, + } + + Or, for example, in string format: + + gres_per_node = "gpu:tesla:1,gpu:volta:5" + + GPU Gres without a specific type: + + gres_per_node = "gpu:6" + gpu_binding (str): + Specify GPU binding for the Job. + This is the same as --gpu-bind from sbatch. + ntasks (int): + Maximum amount of tasks for the Job. + This is the same as -n/--ntasks from sbatch. + ntasks_per_node (int): + Amount of tasks to be invoked on each node. + This is the same as --ntasks-per-node from sbatch. + ntasks_per_socket (int): + Maximum amount of tasks to be invoked on each socket. + This is the same as --ntasks-per-socket from sbatch. + ntasks_per_core (int): + Maximum amount of tasks to be invoked on each core. + This is the same as --ntasks-per-core from sbatch. + ntasks_per_gpu (int): + Amount of tasks to be invoked per GPU. + This is the same as --ntasks-per-socket from sbatch. + switches (Union[dict, str, int]): + Maximum amount of leaf switches and wait time desired. + + This can also optionally include a maximum waiting time for these + switches. + This is the same as --switches from sbatch. + + Examples: + Specifying it as a dict: + + switches = { "count": 5, "max_wait_time": "00:10:00" } + + Or as a single string (sbatch-style): + + switches = "5@00:10:00" + signal (Union[dict, str]): + Warn signal to be sent to the Job. + + This is the same as --signal from sbatch. + The signal can both be specified with its name, e.g. "SIGKILL", or + as a number, e.g. 9 + + Examples: + Specifying it as a dict: + + signal = { + "signal": "SIGKILL", + "time": 120 + } + + The above will send a "SIGKILL" signal 120 seconds before the + Jobs' time limit is reached. + + Or, specifying it as a string (sbatch-style): + + signal = "SIGKILL@120" + standard_in (str): + Path to a File acting as standard_in for the batch-script. + This is the same as -i/--input from sbatch. + standard_in (str): + Path to a File acting as standard_in for the batch-script. + This is the same as -i/--input from sbatch. + standard_output (str): + Path to a File to write the Jobs standard_output. + This is the same as -o/--output from sbatch. + kill_on_invalid_dependency (bool): + Kill the job if it has an invalid dependency. + This is the same as --kill-on-invalid-dep from sbatch. + spreads_over_nodes (bool): + Spread the Job over as many nodes as possible. + This is the same as --spread-job from sbatch. + use_min_nodes (bool): + Prefer the minimum amount of nodes specified. + This is the same as --use-min-nodes from sbatch. + gres_binding (str): + Generic resource task binding options. + This is the --gres-flags option from sbatch. + + Possible values are: + * "enforce-binding" + * "disable-binding" + temporary_disk_per_node (Union[str, int]): + Amount of temporary disk space needed per node. + + This is the same as --tmp from sbatch. You can specify units like + K|M|G|T (multiples of 1024). + If no unit is specified, the value will be assumed as Mebibytes. + + Examples: + # 2048 MiB + tmp_disk_per_node = "2G" + + # 1024 MiB + tmp_disk_per_node = 1024 + get_user_environment (Union[str, bool, int]): + TODO + min_cpus_per_node (str): + Set the minimum amount of CPUs required per Node. + This is the same as --mincpus from sbatch. + wait_all_nodes (bool): + Controls when the execution of the command begins. + + A value of True means that the Job should begin execution only + after all nodes in the allocation are ready. Setting it to False, + the default, means that it is not waited for the nodes to be + ready. (i.e booted) + """ + cdef: + slurm.job_desc_msg_t *ptr + is_update + + cdef public: + name + account + user_id + group_id + priority + site_factor + wckey + array + batch_constraints + begin_time + clusters + cluster_constraints + comment + admin_comment + requires_contiguous_nodes + cores_reserved_for_system + threads_reserved_for_system + working_directory + cpu_frequency + nodes + deadline + delay_boot_time + dependencies + excluded_nodes + required_nodes + constraints + kill_on_node_fail + licenses + mail_user + mail_types + mcs_label + memory_per_cpu + memory_per_node + memory_per_gpu + network + nice + log_files_open_mode + overcommit + partitions + power_options + profile_types + accounting_gather_frequency + qos + requires_node_reboot + is_requeueable + reservations + script + script_args + environment + resource_sharing + distribution + time_limit + time_limit_min + container + cpus_per_task + cpus_per_gpu + sockets_per_node + cores_per_socket + threads_per_core + gpus + gpus_per_socket + gpus_per_task + gres_per_node + gpu_binding + ntasks + ntasks_per_node + ntasks_per_socket + ntasks_per_core + ntasks_per_gpu + switches + signal + standard_in + standard_output + standard_error + kill_on_invalid_dependency + spreads_over_nodes + use_min_nodes + gres_binding + temporary_disk_per_node + get_user_environment + min_cpus_per_node + wait_all_nodes diff --git a/pyslurm/core/job/submission.pyx b/pyslurm/core/job/submission.pyx new file mode 100644 index 00000000..e1f4039d --- /dev/null +++ b/pyslurm/core/job/submission.pyx @@ -0,0 +1,682 @@ +######################################################################### +# submission.pyx - interface for submitting slurm jobs +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from os import getcwd +from os import environ as pyenviron +import re +import typing +import shlex +from pathlib import Path +from pyslurm.core.common cimport cstr, ctime +from pyslurm.core.common import cstr +from pyslurm.core.common.uint cimport * +from pyslurm.core.common.uint import * +from pyslurm.core.common.ctime cimport time_t +from pyslurm.core.job.util import * +from pyslurm.core.error import RPCError, verify_rpc +from pyslurm.core.job.sbatch_opts import _parse_opts_from_batch_script +from pyslurm.core.common.ctime import ( + secs_to_timestr, + timestr_to_secs, + mins_to_timestr, + timestr_to_mins, + timestamp_to_date, + date_to_timestamp, +) +from pyslurm.core.job.task_dist cimport TaskDistribution + +from pyslurm.core.common import ( + humanize, + dehumanize, + signal_to_num, + user_to_uid, + group_to_gid, + uid_to_name, + gid_to_name, +) + + +cdef class JobSubmitDescription: + def __cinit__(self): + self.ptr = NULL + + def __init__(self, **kwargs): + # Initialize explicitly provided attributes, if any. + for k, v in kwargs.items(): + setattr(self, k, v) + + def __dealloc__(self): + slurm_free_job_desc_msg(self.ptr) + + def _alloc_and_init(self): + slurm_free_job_desc_msg(self.ptr) + + self.ptr = try_xmalloc(sizeof(job_desc_msg_t)) + if not self.ptr: + raise MemoryError("xmalloc for job_desc_msg_t failed.") + + slurm_init_job_desc_msg(self.ptr) + + def submit(self): + """Submit a batch job description. + + Returns: + (int): The ID of the submitted Job. + + Raises: + RPCError: When the job submission was not successful. + MemoryError: If malloc failed to allocate enough memory. + + Examples: + >>> desc = JobSubmitDescription( + >>> name="test-job", + >>> cpus_per_task=1, + >>> time_limit="10-00:00:00") + >>> + >>> job_id = desc.submit() + """ + cdef submit_response_msg_t *resp = NULL + + self._create_job_submit_desc() + verify_rpc(slurm_submit_batch_job(self.ptr, &resp)) + + job_id = resp.job_id + slurm_free_submit_response_response_msg(resp) + + return job_id + + def load_environment(self, overwrite=False): + """Load values of attributes provided through the environment. + + Args: + overwrite (bool): + If set to True, the value from an option found in the + environment will override its current value. Default is False + """ + self._parse_env(overwrite) + + def load_sbatch_options(self, overwrite=False): + """Load values from #SBATCH options in the batch script. + + Args: + overwrite (bool): + If set to True, the value from an option found in the in the + batch script will override its current value. Default is False + """ + _parse_opts_from_batch_script(self, self.script, overwrite) + + def _parse_env(self, overwrite=False): + for attr in dir(self): + if attr.startswith("_") or callable(attr): + # Ignore everything starting with "_" and all functions. + # Arguments directly specified upon object creation will + # always have precedence. + continue + + spec = attr.upper() + val = pyenviron.get(f"PYSLURM_JOBDESC_{spec)}") + if (val is not None + and (getattr(self, attr) is None or overwrite)): + + # Just convert literal true/false strings to bool. + tmp = val.casefold() + if tmp == "true": + val = True + elif tmp == "false": + val = False + + setattr(self, attr, val) + + def _create_job_submit_desc(self, is_update=False): + self.is_update = is_update + self._alloc_and_init() + cdef slurm.job_desc_msg_t *ptr = self.ptr + + if not self.is_update: + self._validate_options() + self._set_defaults() + + if self.nice: + ptr.nice = slurm.NICE_OFFSET + int(self.nice) + + if self.site_factor: + ptr.site_factor = slurm.NICE_OFFSET + int(self.site_factor) + + if self.user_id is not None: + ptr.user_id = user_to_uid(self.user_id) + if self.group_id is not None: + ptr.group_id = group_to_gid(self.group_id) + + cstr.fmalloc(&ptr.name, self.name) + cstr.fmalloc(&ptr.account, self.account) + cstr.fmalloc(&ptr.wckey, self.wckey) + cstr.fmalloc(&ptr.array_inx, self.array) + cstr.fmalloc(&ptr.batch_features, self.batch_constraints) + cstr.fmalloc(&ptr.cluster_features, self.cluster_constraints) + cstr.fmalloc(&ptr.comment, self.comment) + cstr.fmalloc(&ptr.work_dir, self.working_directory) + cstr.fmalloc(&ptr.features, self.constraints) + cstr.fmalloc(&ptr.mail_user, self.mail_user) + cstr.fmalloc(&ptr.mcs_label, self.mcs_label) + cstr.fmalloc(&ptr.network, self.network) + cstr.fmalloc(&ptr.qos, self.qos) + cstr.fmalloc(&ptr.container, self.container) + cstr.fmalloc(&ptr.std_in, self.standard_in) + cstr.fmalloc(&ptr.std_out, self.standard_output) + cstr.fmalloc(&ptr.std_err, self.standard_error) + cstr.fmalloc(&ptr.tres_per_job, cstr.from_gres_dict(self.gpus, "gpu")) + cstr.fmalloc(&ptr.tres_per_socket, + cstr.from_gres_dict(self.gpus_per_socket, "gpu")) + cstr.fmalloc(&ptr.tres_per_task, + cstr.from_gres_dict(self.gpus_per_task, "gpu")) + cstr.fmalloc(&ptr.tres_per_node, + cstr.from_gres_dict(self.gres_per_node)) + cstr.fmalloc(&ptr.cpus_per_tres, + cstr.from_gres_dict(self.cpus_per_gpu, "gpu")) + cstr.fmalloc(&ptr.admin_comment, self.admin_comment) + + cstr.from_list(&ptr.clusters, self.clusters) + cstr.from_list(&ptr.exc_nodes, self.excluded_nodes) + cstr.from_list(&ptr.req_nodes, self.required_nodes) + cstr.from_list(&ptr.licenses, self.licenses) + cstr.from_list(&ptr.partition, self.partitions) + cstr.from_list(&ptr.reservation, self.reservations) + cstr.from_dict(&ptr.acctg_freq, self.accounting_gather_frequency) + + ptr.deadline = date_to_timestamp(self.deadline) + ptr.begin_time = date_to_timestamp(self.begin_time) + ptr.delay_boot = timestr_to_secs(self.delay_boot_time) + ptr.time_limit = timestr_to_mins(self.time_limit) + ptr.time_min = timestr_to_mins(self.time_limit_min) + + ptr.priority = u32(self.priority, zero_is_noval=False) + ptr.num_tasks = u32(self.ntasks) + ptr.pn_min_tmp_disk = u32(dehumanize(self.temporary_disk_per_node)) + ptr.cpus_per_task = u16(self.cpus_per_task) + ptr.sockets_per_node = u16(self.sockets_per_node) + ptr.cores_per_socket = u16(self.cores_per_socket) + ptr.ntasks_per_socket = u16(self.ntasks_per_socket) + ptr.ntasks_per_tres = u16(self.ntasks_per_gpu) + ptr.ntasks_per_node = u16(self.ntasks_per_node) + ptr.threads_per_core = u16(self.threads_per_core) + ptr.ntasks_per_core = u16(self.ntasks_per_core) + u64_set_bool_flag(&ptr.bitflags, self.spreads_over_nodes, + slurm.SPREAD_JOB) + u64_set_bool_flag(&ptr.bitflags, self.kill_on_invalid_dependency, + slurm.KILL_INV_DEP) + u64_set_bool_flag(&ptr.bitflags, self.use_min_nodes, + slurm.USE_MIN_NODES) + ptr.contiguous = u16_bool(self.requires_contiguous_nodes) + ptr.kill_on_node_fail = u16_bool(self.kill_on_node_fail) + ptr.overcommit = u8_bool(self.overcommit) + ptr.reboot = u16_bool(self.requires_node_reboot) + ptr.requeue = u16_bool(self.is_requeueable) + ptr.wait_all_nodes = u16_bool(self.wait_all_nodes) + + ptr.mail_type = mail_type_list_to_int(self.mail_types) + ptr.power_flags = power_type_list_to_int(self.power_options) + ptr.profile = acctg_profile_list_to_int(self.profile_types) + ptr.shared = shared_type_str_to_int(self.resource_sharing) + + self._set_cpu_frequency() + self._set_nodes() + self._set_dependencies() + self._set_memory() + self._set_open_mode() + self._set_script() + self._set_script_args() + self._set_environment() + self._set_distribution() + self._set_gpu_binding() + self._set_gres_binding() + self._set_min_cpus() + + # TODO + # burst_buffer + # mem_bind, mem_bind_type? + # gpu_freq + # --hint + # spank_env + # --propagate for rlimits + + def _set_defaults(self): + if not self.ntasks: + self.ntasks = 1 + if not self.cpus_per_task: + self.cpus_per_task = 1 + if not self.working_directory: + self.working_directory = str(getcwd()) + if not self.environment: + # By default, sbatch also exports everything in the users env. + self.environment = "ALL" + + def _validate_options(self): + if not self.script: + raise ValueError("You need to provide a batch script.") + + if (self.memory_per_node and self.memory_per_cpu + or self.memory_per_gpu and self.memory_per_cpu + or self.memory_per_node and self.memory_per_gpu): + raise ValueError("Only one of memory_per_cpu, memory_per_node or " + "memory_per_gpu can be set.") + + if (self.ntasks_per_gpu and + (self.ptr.min_nodes != u32(None) or self.nodes + or self.gpus_per_task or self.gpus_per_socket + or self.ntasks_per_node)): + raise ValueError("ntasks_per_gpu is mutually exclusive with " + "nodes, gpus_per_task, gpus_per_socket and " + "ntasks_per_node.") + + if self.cpus_per_gpu and self.cpus_per_task: + raise ValueError("cpus_per_task and cpus_per_gpu " + "are mutually exclusive.") + + if (self.cores_reserved_for_system + and self.threads_reserved_for_system): + raise ValueError("cores_reserved_for_system is mutually " + " exclusive with threads_reserved_for_system.") + + def _set_core_spec(self): + if self.cores_reserved_for_system: + self.ptr.core_spec = u16(self.cores_reserved_for_system) + elif self.threads_reserved_for_system: + self.ptr.core_spec = u16(self.threads_reserved_for_system) + self.ptr.core_spec |= slurm.CORE_SPEC_THREAD + + def _set_cpu_frequency(self): + if not self.cpu_frequency: + return None + + freq = self.cpu_frequency + have_no_range = False + + # Alternatively support sbatch-like --cpu-freq setting. + if not isinstance(freq, dict): + freq_splitted = re.split("[-:]+", str(freq)) + freq_len = len(freq_splitted) + freq = {} + + # Transform cpu-freq string to the individual components. + if freq_splitted[0].isdigit(): + freq["max"] = freq_splitted[0] + else: + if freq_len > 1: + raise ValueError( + "Invalid cpu_frequency format: {kwargs}." + "Governor must be provided as single element or " + "as last element in the form of min-max:governor. " + ) + freq["governor"] = freq_splitted[0] + + if freq_len >= 2: + freq["min"] = freq["max"] + freq["max"] = freq_splitted[1] + + if freq_len == 3: + freq["governor"] = freq_splitted[2] + + freq_min = cpu_freq_str_to_int(freq.get("min")) + freq_max = cpu_freq_str_to_int(freq.get("max")) + freq_gov = cpu_gov_str_to_int(freq.get("governor")) + + if freq_min != u32(None): + if freq_max == u32(None): + freq_max = freq_min + freq_min = u32(None) + have_no_range = True + elif freq_max < freq_min: + raise ValueError( + f"min cpu-freq ({freq_min}) must be smaller " + f"than max cpu-freq ({freq_max})" + ) + elif freq_max != u32(None) and freq_min == u32(None): + have_no_range = True + + if have_no_range and freq_gov != u32(None): + raise ValueError( + "Setting Governor when specifying only either one " + "of min or max is not allowed." + ) + + self.ptr.cpu_freq_min = freq_min + self.ptr.cpu_freq_max = freq_max + self.ptr.cpu_freq_gov = freq_gov + + def _set_nodes(self): + vals = self.nodes + nmin=nmax = 1 + + if self.is_update: + return None + + # Support input like --nodes from sbatch (min-[max]) + if isinstance(vals, dict): + nmin = u32(vals.get("min", 1), on_noval=1) + nmax = u32(vals.get("max", 1), on_noval=nmin) + elif vals is not None: + v = str(vals).split("-", 1) + nmin = int(v[0]) + if nmin == 0: + nmin = 1 + if "-" in str(vals): + nmax = int(v[1]) + else: + nmax = nmin + + if not nmax: + nmax = nmin + if nmax < nmin: + raise ValueError("Max Nodecount cannot be " + "less than minimum nodecount.") + + self.ptr.min_nodes = nmin + self.ptr.max_nodes = nmax + + def _set_dependencies(self): + val = self.dependencies + final = None + + if isinstance(val, str): + # TODO: Even though everything is checked in the slurmctld, maybe + # still do some sanity checks here on the input when a string + # is provided. + final = val + elif val is not None: + satisfy = val.pop("satisfy", "all").casefold() + + if satisfy == "any": + delim = "?" + else: + delim = "," + + final = [] + for k, v in val.items(): + if k == "singleton" and bool(v): + final.append("singleton") + continue + + if not isinstance(v, list): + raise TypeError(f"Values for {k} must be list, " + f"got {type(v)}.") + # Convert everything to strings and add it to the dependency + # list. + v[:] = [str(s) for s in v] + final.append(f"{k}:{':'.join(v)}") + + final = delim.join(final) + + cstr.fmalloc(&self.ptr.dependency, final) + + def _set_memory(self): + if self.memory_per_cpu: + self.ptr.pn_min_memory = u64(dehumanize(self.memory_per_cpu)) + self.ptr.pn_min_memory |= slurm.MEM_PER_CPU + elif self.memory_per_node: + self.ptr.pn_min_memory = u64(dehumanize(self.memory_per_node)) + elif self.memory_per_gpu: + mem_gpu = u64(dehumanize(val)) + cstr.fmalloc(&self.ptr.mem_per_tres, f"gres:gpu:{mem_gpu}") + + def _set_open_mode(self): + val = self.log_files_open_mode + if val == "append": + self.ptr.open_mode = slurm.OPEN_MODE_APPEND + elif val == "truncate": + self.ptr.open_mode = slurm.OPEN_MODE_TRUNCATE + + def _set_script(self): + sfile = self.script + sbody = None + + if self.is_update: + return None + + if Path(sfile).is_file(): + # First assume the caller is passing a path to a script and we try + # to load it. + sbody = Path(sfile).read_text() + else: + # Otherwise assume that the script content is passed directly. + sbody = sfile + if self.script_args: + raise ValueError("Passing arguments to a script is only allowed " + "if it was loaded from a file.") + + # Validate the script + if not sbody or not len(sbody): + raise ValueError("Batch script is empty or none was provided.") + elif sbody.isspace(): + raise ValueError("Batch script contains only whitespace.") + elif not sbody.startswith("#!"): + msg = "Not a valid Batch script. " + msg += "First line must start with '#!'," + msg += "followed by the path to an interpreter" + raise ValueError(msg) + elif "\0" in sbody: + msg = "The Slurm Controller does not allow scripts that " + msg += "contain a NULL character: '\\0'." + raise ValueError(msg) + elif "\r\n" in sbody: + msg = "Batch script contains DOS line breaks (\\r\\n) " + msg += "instead of expected UNIX line breaks (\\n)." + raise ValueError(msg) + + cstr.fmalloc(&self.ptr.script, sbody) + + def _set_script_args(self): + args = self.script_args + if not args: + return None + + if isinstance(args, str): + sargs = shlex.split(args) + else: + sargs = list(args) + + # Script should always first in argv. + if sargs[0] != self.script: + sargs.insert(0, self.script) + + self.ptr.argc = len(sargs) + self.ptr.argv = try_xmalloc(self.ptr.argc * sizeof(char*)) + if not self.ptr.argv: + raise MemoryError("xmalloc failed for script_args") + + for idx, opt in enumerate(sargs): + cstr.fmalloc(&self.ptr.argv[idx], opt) + + def _set_environment(self): + if self.is_update: + return None + + vals = self.environment + get_user_env = self.get_user_environment + + # Clear any previous environment set for the Job. + slurm_env_array_free(self.ptr.environment) + self.ptr.env_size = 0 + + # Allocate a new environment. + self.ptr.environment = slurm_env_array_create() + + if isinstance(vals, str) or vals is None: + if vals is None or vals.casefold() == "all": + # This is the default. Export all current environment + # variables into the Job. + slurm_env_array_merge(&self.ptr.environment, + slurm.environ) + elif vals.casefold() == "none": + # Only env variables starting with "SLURM_" will be exported. + for var, val in pyenviron.items(): + if var.startswith("SLURM_"): + slurm_env_array_overwrite(&self.ptr.environment, + var, str(val)) + get_user_env = True + else: + # Assume Env-vars were provided sbatch style like a string. + # Setup all 'SLURM' env vars found first. + for var, val in pyenviron.items(): + if var.startswith("SLURM_"): + slurm_env_array_overwrite(&self.ptr.environment, + var, str(val)) + + # Merge the provided environment variables from the string in. + for idx, item in enumerate(vals.split(",")): + if idx == 0 and item.casefold() == "all": + slurm_env_array_merge(&self.ptr.environment, + slurm.environ) + continue + + if not "=" in item: + continue + + var, val = item.split("=", 1) + slurm_env_array_overwrite(&self.ptr.environment, + var, str(val)) + get_user_env = True + else: + # Here, the user provided an actual dictionary as Input. + # Setup all 'SLURM' env vars first. + for var, val in pyenviron.items(): + if var.startswith("SLURM_"): + slurm_env_array_overwrite(&self.ptr.environment, + var, str(val)) + + # Setup all User selected env vars. + for var, val in vals.items(): + slurm_env_array_overwrite(&self.ptr.environment, + var, str(val)) + + if get_user_env: + slurm_env_array_overwrite(&self.ptr.environment, + "SLURM_GET_USER_ENV", "1") + + # Calculate Environment size + while self.ptr.environment and self.ptr.environment[self.ptr.env_size]: + self.ptr.env_size+=1 + + def _set_distribution(self): + dist=plane = None + + if not self.distribution: + self.ptr.task_dist = slurm.SLURM_DIST_UNKNOWN + return None + + if isinstance(self.distribution, int): + # Assume the user meant to specify the plane size only. + plane = u16(self.distribution) + elif isinstance(self.distribution, str): + # Support sbatch style string input + dist = TaskDistribution.from_str(self.distribution) + plane = dist.plane if isinstance(dist.plane, int) else 0 + + if plane: + self.ptr.plane_size = plane + self.ptr.task_dist = slurm.SLURM_DIST_PLANE + elif dist is not None: + self.ptr.task_dist = dist.as_int() + + def _set_gpu_binding(self): + binding = self.gpu_binding + + if not binding: + if self.ptr.ntasks_per_tres != u16(None): + # Set gpu bind implicit to single:ntasks_per_gpu + binding = f"single:{self.ntasks_per_gpu}" + else: + binding = self.gpu_binding.replace("verbose,", "") \ + .replace("gpu:", "") + if "verbose" in self.gpu_binding: + binding = f"verbose,gpu:{binding}" + + cstr.fmalloc(&self.ptr.tres_bind, binding) + + def _set_min_cpus(self): + if self.min_cpus_per_node: + self.ptr.min_cpus = u16(self.min_cpus_per_node) + elif not self.is_update: + if self.overcommit: + self.ptr.min_cpus = max(self.ptr.min_nodes, 1) + + self.ptr.min_cpus = self.ptr.cpus_per_task * self.ptr.num_tasks + + def _set_switches(self): + kwargs = self.switches + if isinstance(kwargs, dict): + self.ptr.req_switch = u32(kwargs.get("count")) + self.ptr.wait4switch = timestr_to_secs(kwargs.get("max_wait_time")) + elif kwargs is not None: + vals = str(kwargs.split("@")) + if len(vals) > 1: + self.ptr.wait4switch = timestr_to_secs(vals[1]) + self.ptr.req_switch = u32(vals[0]) + + def _set_signal(self): + vals = self.signal + if not vals: + return None + + info = vals + # This supports input like the --signal option from sbatch + if vals and not isinstance(vals, dict): + info = {} + val_list = re.split("[:@]+", str(vals)) + + if len(val_list): + if ":" in str(vals): + flags = val_list.pop(0).casefold() + + if "r" in flags: + info["allow_reservation_overlap"] = True + + if "b" in flags: + info["batch_only"] = True + + if "@" in str(vals): + info["time"] = val_list[1] + + info["signal"] = val_list[0] + + # Parse values first to catch bad input + w_signal = u16(signal_to_num(info.get("signal"))) + w_time = u16(info.get("time"), on_noval=60) + batch_only = bool(info.get("batch_only")) + allow_resv_overlap = bool(info.get("allow_reservation_overlap")) + + # Then set it. At this point we can be sure that the input is correct. + self.ptr.warn_signal = w_signal + self.ptr.warn_time = w_time + u16_set_bool_flag(&self.ptr.warn_flags, + batch_only, slurm.KILL_JOB_BATCH) + u16_set_bool_flag(&self.ptr.warn_flags, + allow_resv_overlap, slurm.KILL_JOB_RESV) + + def _set_gres_binding(self): + if not self.gres_binding: + return None + elif self.gres_binding.casefold() == "enforce-binding": + self.ptr.bitflags |= slurm.GRES_ENFORCE_BIND + elif self.gres_binding.casefold() == "disable-binding": + self.ptr.bitflags |= slurm.GRES_DISABLE_BIND diff --git a/pyslurm/core/job/task_dist.pxd b/pyslurm/core/job/task_dist.pxd new file mode 100644 index 00000000..5fe76488 --- /dev/null +++ b/pyslurm/core/job/task_dist.pxd @@ -0,0 +1,41 @@ +######################################################################### +# task_dist.pxd - job task distribution +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.core.common.uint cimport u16 +from pyslurm.slurm cimport ( + task_dist_states_t, +) + + +cdef class TaskDistribution: + + cdef public: + str nodes + str sockets + str cores + plane + pack + + cdef task_dist_states_t state diff --git a/pyslurm/core/job/task_dist.pyx b/pyslurm/core/job/task_dist.pyx new file mode 100644 index 00000000..0c46cbc8 --- /dev/null +++ b/pyslurm/core/job/task_dist.pyx @@ -0,0 +1,352 @@ +######################################################################### +# task_dist.pyx - job task distribution +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + + +cdef class TaskDistribution: + + def __init__(self, nodes="block", sockets="cyclic", + cores=None, pack=None, plane_size=None): + self.nodes = nodes + self.sockets = sockets + self.cores = cores if cores else self.sockets + self.pack = pack + self.plane = plane_size + self.state = self._get_task_dist_state() + + def __eq__(self, other): + if not isinstance(other, TaskDistribution): + return NotImplemented + return self.as_int() == other.as_int() + + @staticmethod + def from_int(dist): + cdef TaskDistribution tdist = None + + if int(dist) <= 0 or dist == slurm.SLURM_DIST_UNKNOWN: + return None + + if (dist & slurm.SLURM_DIST_STATE_BASE) != slurm.SLURM_DIST_UNKNOWN: + tdist = _parse_task_dist_from_int(dist) + + dist_flag = dist & slurm.SLURM_DIST_STATE_FLAGS + tdist = _parse_task_dist_flags_from_int(tdist, dist_flag) + + if tdist: + tdist.state = dist + + return tdist + + def _to_str_no_flags(self): + if self.plane: + return "plane" + + dist_str = "" + nodes = self.nodes + if nodes is not None and nodes != "*": + dist_str = f"{nodes}" + else: + dist_str = "block" + + sockets = self.sockets + if sockets is not None and sockets != "*": + dist_str = f"{dist_str}:{sockets}" + else: + dist_str = f"{dist_str}:cyclic" + + cores = self.cores + if cores is not None and cores != "*": + dist_str = f"{dist_str}:{cores}" + else: + dist_str = f"{dist_str}:{sockets}" + + return dist_str + + def to_str(self): + dist_str = self._to_str_no_flags() + + if self.pack is not None: + dist_str = f"{dist_str},{'Pack' if self.pack else 'NoPack'}" + + return dist_str + + def to_dict(self): + return { + "nodes": self.nodes, + "sockets": self.sockets, + "cores": self.cores, + "plane": self.plane, + "pack": self.pack, + } + + def as_int(self): + return self.state + + def _get_task_dist_state(self): + cdef task_dist_states_t dist_state + + dist_str = self._to_str_no_flags() + if dist_str == "plane": + return slurm.SLURM_DIST_PLANE + + dist_state = _parse_str_to_task_dist_int(dist_str) + if dist_state == slurm.SLURM_DIST_UNKNOWN: + raise ValueError(f"Invalid distribution specification: {dist_str}") + + # Check for Pack/NoPack + # Don't do anything if it is None + if self.pack: + dist_state = (dist_state | slurm.SLURM_DIST_PACK_NODES) + elif self.pack is not None and not self.pack: + dist_state = (dist_state | slurm.SLURM_DIST_NO_PACK_NODES) + + return dist_state + + @staticmethod + def from_str(dist_str): + cdef TaskDistribution tdist = TaskDistribution.__new__(TaskDistribution) + + # Plane method - return early because nothing else can be + # specified when this is set. + if "plane" in dist_str: + if "plane=" in dist_str: + plane_size = u16(dist_str.split("=", 1)[1]) + return TaskDistribution(plane_size=plane_size) + else: + return TaskDistribution(plane_size=True) + + # [0] = distribution method for nodes:sockets:cores + # [1] = pack/nopack specification (true or false) + dist_items = dist_str.split(",", 1) + + # Parse the different methods + dist_methods = dist_items[0].split(":") + if len(dist_methods) and dist_methods[0] != "*": + tdist.nodes = dist_methods[0] + + if len(dist_methods) > 2 and dist_methods[1] != "*": + tdist.sockets = dist_methods[1] + + if len(dist_methods) >= 3: + if dist_methods[2] == "*": + tdist.cores = tdist.sockets + else: + tdist.cores = dist_methods[2] + + if len(dist_items) > 1: + if dist_items[1].casefold() == "pack": + tdist.pack = True + elif dist_items[1].casefold() == "nopack": + tdist.pack = False + + tdist.state = tdist._get_task_dist_state() + return tdist + + +# https://github.com/SchedMD/slurm/blob/510ba4f17dfa559b579aa054cb8a415dcc224abc/src/common/proc_args.c#L319 +def _parse_task_dist_from_int(dist): + cdef TaskDistribution out = TaskDistribution.__new__(TaskDistribution) + + state = dist & slurm.SLURM_DIST_STATE_BASE + if state == slurm.SLURM_DIST_BLOCK: + out.nodes = "block" + elif state == slurm.SLURM_DIST_CYCLIC: + out.nodes = "cyclic" + elif state == slurm.SLURM_DIST_PLANE: + out.plane = state + elif state == slurm.SLURM_DIST_ARBITRARY: + out.nodes = "arbitrary" + elif state == slurm.SLURM_DIST_CYCLIC_CYCLIC: + out.nodes = "cyclic" + out.sockets = "cyclic" + elif state == slurm.SLURM_DIST_CYCLIC_BLOCK: + out.nodes = "cyclic" + out.sockets = "block" + elif state == slurm.SLURM_DIST_CYCLIC_CFULL: + out.nodes = "cyclic" + out.sockets = "fcyclic" + elif state == slurm.SLURM_DIST_BLOCK_CYCLIC: + out.nodes = "block" + out.sockets = "cyclic" + elif state == slurm.SLURM_DIST_BLOCK_BLOCK: + out.nodes = "block" + out.sockets = "block" + elif state == slurm.SLURM_DIST_BLOCK_CFULL: + out.nodes = "block" + out.sockets = "fcyclic" + elif state == slurm.SLURM_DIST_CYCLIC_CYCLIC_CYCLIC: + out.nodes = "cyclic" + out.sockets = "cyclic" + out.cores = "cyclic" + elif state == slurm.SLURM_DIST_CYCLIC_CYCLIC_BLOCK: + out.nodes = "cyclic" + out.sockets = "cyclic" + out.cores = "block" + elif state == slurm.SLURM_DIST_CYCLIC_CYCLIC_CFULL: + out.nodes = "cyclic" + out.sockets = "cyclic" + out.cores = "fcyclic" + elif state == slurm.SLURM_DIST_CYCLIC_BLOCK_CYCLIC: + out.nodes = "cyclic" + out.sockets = "block" + out.cores = "cyclic" + elif state == slurm.SLURM_DIST_CYCLIC_BLOCK_CYCLIC: + out.nodes = "cyclic" + out.sockets = "block" + out.cores = "cyclic" + elif state == slurm.SLURM_DIST_CYCLIC_BLOCK_BLOCK: + out.nodes = "cyclic" + out.sockets = "block" + out.cores = "block" + elif state == slurm.SLURM_DIST_CYCLIC_BLOCK_CFULL: + out.nodes = "cyclic" + out.sockets = "block" + out.cores = "fcyclic" + elif state == slurm.SLURM_DIST_CYCLIC_CFULL_CYCLIC: + out.nodes = "cyclic" + out.sockets = "fcyclic" + out.cores = "cyclic" + elif state == slurm.SLURM_DIST_CYCLIC_CFULL_BLOCK: + out.nodes = "cyclic" + out.sockets = "fcyclic" + out.cores = "block" + elif state == slurm.SLURM_DIST_CYCLIC_CFULL_CFULL: + out.nodes = "cyclic" + out.sockets = "fcyclic" + out.cores = "fcyclic" + elif state == slurm.SLURM_DIST_BLOCK_CYCLIC_CYCLIC: + out.nodes = "block" + out.sockets = "cyclic" + out.cores = "cyclic" + elif state == slurm.SLURM_DIST_BLOCK_CYCLIC_BLOCK: + out.nodes = "block" + out.sockets = "cyclic" + out.cores = "block" + elif state == slurm.SLURM_DIST_BLOCK_CYCLIC_CFULL: + out.nodes = "block" + out.sockets = "cyclic" + out.cores = "fcyclic" + elif state == slurm.SLURM_DIST_BLOCK_BLOCK_CYCLIC: + out.nodes = "block" + out.sockets = "block" + out.cores = "cyclic" + elif state == slurm.SLURM_DIST_BLOCK_BLOCK_BLOCK: + out.nodes = "block" + out.sockets = "block" + out.cores = "block" + elif state == slurm.SLURM_DIST_BLOCK_BLOCK_CFULL: + out.nodes = "block" + out.sockets = "block" + out.cores = "fcyclic" + elif state == slurm.SLURM_DIST_BLOCK_CFULL_CYCLIC: + out.nodes = "block" + out.sockets = "fcyclic" + out.cores = "cyclic" + elif state == slurm.SLURM_DIST_BLOCK_CFULL_BLOCK: + out.nodes = "block" + out.sockets = "fcyclic" + out.cores = "block" + elif state == slurm.SLURM_DIST_BLOCK_CFULL_CFULL: + out.nodes = "block" + out.sockets = "fcyclic" + out.cores = "fcyclic" + else: + return None + + return out + + +def _parse_task_dist_flags_from_int(TaskDistribution dst, dist_flag): + if not dist_flag: + return dst + + cdef TaskDistribution _dst = dst + if not _dst: + _dst = TaskDistribution.__new__(TaskDistribution) + + if dist_flag == slurm.SLURM_DIST_PACK_NODES: + _dst.pack = True + elif dist_flag == slurm.SLURM_DIST_NO_PACK_NODES: + _dst.pack = False + + return _dst + + +def _parse_str_to_task_dist_int(dist_str): + # Select the correct distribution method according to dist_str. + if dist_str == "cyclic": + return slurm.SLURM_DIST_CYCLIC + elif dist_str == "block": + return slurm.SLURM_DIST_BLOCK + elif dist_str == "arbitrary" or dist_str == "hostfile": + return slurm.SLURM_DIST_ARBITRARY + elif dist_str == "cyclic:cyclic": + return slurm.SLURM_DIST_CYCLIC_CYCLIC + elif dist_str == "cyclic:block": + return slurm.SLURM_DIST_CYCLIC_BLOCK + elif dist_str == "block:block": + return slurm.SLURM_DIST_BLOCK_BLOCK + elif dist_str == "block:cyclic": + return slurm.SLURM_DIST_BLOCK_CYCLIC + elif dist_str == "block:fcyclic": + return slurm.SLURM_DIST_BLOCK_CFULL + elif dist_str == "cyclic:fcyclic": + return slurm.SLURM_DIST_CYCLIC_CFULL + elif dist_str == "cyclic:cyclic:cyclic": + return slurm.SLURM_DIST_CYCLIC_CYCLIC_CYCLIC + elif dist_str == "cyclic:cyclic:block": + return slurm.SLURM_DIST_CYCLIC_CYCLIC_BLOCK + elif dist_str == "cyclic:cyclic:fcyclic": + return slurm.SLURM_DIST_CYCLIC_CYCLIC_CFULL + elif dist_str == "cyclic:block:cyclic": + return slurm.SLURM_DIST_CYCLIC_BLOCK_CYCLIC + elif dist_str == "cyclic:block:block": + return slurm.SLURM_DIST_CYCLIC_BLOCK_BLOCK + elif dist_str == "cyclic:block:fcyclic": + return slurm.SLURM_DIST_CYCLIC_BLOCK_CFULL + elif dist_str == "cyclic:fcyclic:cyclic": + return slurm.SLURM_DIST_CYCLIC_CFULL_CYCLIC + elif dist_str == "cyclic:fcyclic:block": + return slurm.SLURM_DIST_CYCLIC_CFULL_BLOCK + elif dist_str == "cyclic:fcyclic:fcyclic": + return slurm.SLURM_DIST_CYCLIC_CFULL_CFULL + elif dist_str == "block:cyclic:cyclic": + return slurm.SLURM_DIST_BLOCK_CYCLIC_CYCLIC + elif dist_str == "block:cyclic:block": + return slurm.SLURM_DIST_BLOCK_CYCLIC_BLOCK + elif dist_str == "block:cyclic:fcyclic": + return slurm.SLURM_DIST_BLOCK_CYCLIC_CFULL + elif dist_str == "block:block:cyclic": + return slurm.SLURM_DIST_BLOCK_BLOCK_CYCLIC + elif dist_str == "block:block:block": + return slurm.SLURM_DIST_BLOCK_BLOCK_BLOCK + elif dist_str == "block:block:fcyclic": + return slurm.SLURM_DIST_BLOCK_BLOCK_CFULL + elif dist_str == "block:fcyclic:cyclic": + return slurm.SLURM_DIST_BLOCK_CFULL_CYCLIC + elif dist_str == "block:fcyclic:block": + return slurm.SLURM_DIST_BLOCK_CFULL_BLOCK + elif dist_str == "block:fcyclic:fcyclic": + return slurm.SLURM_DIST_BLOCK_CFULL_CFULL + else: + return slurm.SLURM_DIST_UNKNOWN diff --git a/pyslurm/core/job/util.pyx b/pyslurm/core/job/util.pyx new file mode 100644 index 00000000..7b463b2c --- /dev/null +++ b/pyslurm/core/job/util.pyx @@ -0,0 +1,345 @@ +######################################################################### +# util.pyx - utility functions used to parse various job flags +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t +from pyslurm cimport slurm +from pyslurm.core.common.uint import * +from pyslurm.core.common.uint cimport * + +# Note: Maybe consider using libslurmfull again to avoid having to reimplement +# some of these functions and keeping track for changes in new releases. + +def mail_type_list_to_int(mail_types): + """Convert a str or list of mail types to a uint16_t.""" + cdef uint16_t flags = 0 + types = mail_types + + if not types or "None" == types: + return slurm.NO_VAL16 + + if isinstance(types, str): + types = types.split(",") + + for typ in mail_types: + typ = typ.casefold() + + if "array_tasks" == typ: + flags |= slurm.MAIL_ARRAY_TASKS + + elif "begin" == typ: + flags |= slurm.MAIL_JOB_BEGIN + + elif "end" == typ: + flags |= slurm.MAIL_JOB_END + + elif "fail" == typ: + flags |= slurm.MAIL_JOB_FAIL + + # elif "invalid_depend" == typ: + # flags |= slurm.MAIL_INVALID_DEPEND + + elif "requeue" == typ: + flags |= slurm.MAIL_JOB_REQUEUE + + elif "stage_out" == typ: + flags |= slurm.MAIL_JOB_STAGE_OUT + + elif "time_limit" == typ: + flags |= slurm.MAIL_JOB_TIME100 + + elif "time_limit_90" == typ: + flags |= slurm.MAIL_JOB_TIME90 + + elif "time_limit_80" == typ: + flags |= slurm.MAIL_JOB_TIME80 + + elif "time_limit_50" == typ: + flags |= slurm.MAIL_JOB_TIME50 + + elif "all" == typ: + flags |= (slurm.MAIL_JOB_BEGIN + | slurm.MAIL_JOB_END + | slurm.MAIL_JOB_FAIL + | slurm.MAIL_JOB_REQUEUE + | slurm.MAIL_JOB_STAGE_OUT) + else: + raise ValueError("Invalid Mail type: {typ}.") + + return flags + + +def mail_type_int_to_list(uint16_t typ): + """Convert uint16_t to a list of mail types.""" + types = [] + + if typ == 0: + return types + + if typ & slurm.MAIL_ARRAY_TASKS: + types.append("array_tasks") + +# if typ & slurm.MAIL_INVALID_DEPEND: +# types.append("invalid_depend") + + if typ & slurm.MAIL_JOB_BEGIN: + types.append("begin") + + if typ & slurm.MAIL_JOB_END: + types.append("end") + + if typ & slurm.MAIL_JOB_FAIL: + types.append("fail") + + if typ & slurm.MAIL_JOB_REQUEUE: + types.append("requeue") + + if typ & slurm.MAIL_JOB_STAGE_OUT: + types.append("stage_out") + + if typ & slurm.MAIL_JOB_TIME50: + types.append("time_limit_50") + + if typ & slurm.MAIL_JOB_TIME80: + types.append("time_limit_80") + + if typ & slurm.MAIL_JOB_TIME90: + types.append("time_limit_90") + + if typ & slurm.MAIL_JOB_TIME100: + types.append("time_limit_100") + + return types + + +def acctg_profile_list_to_int(acctg_profiles): + """Convert a str or list of accounting gather profiles to uin32_t.""" + cdef uint32_t profile = 0 + profiles = acctg_profiles + + if not acctg_profiles: + return slurm.NO_VAL + + if "none" in acctg_profiles: + return slurm.ACCT_GATHER_PROFILE_NONE + elif "all" in acctg_profiles: + return slurm.ACCT_GATHER_PROFILE_ALL + + if "energy" in acctg_profiles: + profile |= slurm.ACCT_GATHER_PROFILE_ENERGY + + if "task" in acctg_profiles: + profile |= slurm.ACCT_GATHER_PROFILE_TASK + + if "lustre" in acctg_profiles: + profile |= slurm.ACCT_GATHER_PROFILE_LUSTRE + + if "network" in acctg_profiles: + profile |= slurm.ACCT_GATHER_PROFILE_NETWORK + + return profile + + +def acctg_profile_int_to_list(flags): + """Convert uin32_t accounting gather profiles to a list of strings.""" + profiles = [] + + if flags == 0 or flags == slurm.NO_VAL: + return [] + + if flags == slurm.ACCT_GATHER_PROFILE_ALL: + return ["all"] + elif flags == slurm.ACCT_GATHER_PROFILE_NONE: + return [] + + if flags & slurm.ACCT_GATHER_PROFILE_ENERGY: + profiles.append("energy") + + if flags & slurm.ACCT_GATHER_PROFILE_TASK: + profiles.append("task") + + if flags & slurm.ACCT_GATHER_PROFILE_LUSTRE: + profiles.append("lustre") + + if flags & slurm.ACCT_GATHER_PROFILE_NETWORK: + profiles.append("network") + + return profiles + + +def power_type_list_to_int(power_types): + """Convert a str or list of str with power types to uint8_t.""" + cdef uint8_t flags = 0 + + if not power_types: + return slurm.NO_VAL8 + + if "level" in power_types: + flags |= slurm.SLURM_POWER_FLAGS_LEVEL + + +def power_type_int_to_list(flags): + """Convert uint8_t power type flags to a list of strings.""" + types = [] + + if flags & slurm.SLURM_POWER_FLAGS_LEVEL: + types.append("level") + + return types + + +def shared_type_str_to_int(typ): + """Convert a job-sharing type str to its numerical representation.""" + if not typ: + return slurm.NO_VAL16 + + typ = typ.casefold() + if typ == "oversubscribe" or typ == "yes": + return slurm.JOB_SHARED_OK + elif typ == "user": + return slurm.JOB_SHARED_USER + elif typ == "mcs": + return slurm.JOB_SHARED_MCS + elif typ == "no" or typ == "exclusive": + return slurm.JOB_SHARED_NONE + else: + raise ValueError(f"Invalid resource_sharing type: {typ}.") + + +def cpu_gov_str_to_int(gov): + """Convert a cpu governor str to is numerical representation.""" + if not gov: + return u32(None) + + gov = gov.casefold() + rc = 0 + + if gov == "conservative": + rc = slurm.CPU_FREQ_CONSERVATIVE + elif gov == "ondemand": + rc = slurm.CPU_FREQ_ONDEMAND + elif gov == "performance": + rc = slurm.CPU_FREQ_PERFORMANCE + elif gov == "powersave": + rc = slurm.CPU_FREQ_POWERSAVE + elif gov == "userspace": + rc = slurm.CPU_FREQ_USERSPACE + elif gov == "schedutil": + rc = slurm.CPU_FREQ_SCHEDUTIL + else: + raise ValueError("Invalid cpu gov type: {}".format(gov)) + + return rc | slurm.CPU_FREQ_RANGE_FLAG + + +def cpu_freq_str_to_int(freq): + """Convert a cpu-frequency str to its numerical representation.""" + if not freq: + return u32(None) + + if isinstance(freq, str) and not freq.isdigit(): + freq = freq.casefold() + + if freq == "low": + return slurm.CPU_FREQ_LOW + elif freq == "highm1": + return slurm.CPU_FREQ_HIGHM1 + elif freq == "high": + return slurm.CPU_FREQ_HIGH + elif freq == "medium": + return slurm.CPU_FREQ_MEDIUM + else: + fr = u32(int(freq)) + if fr != slurm.NO_VAL: + return fr + + raise ValueError(f"Invalid cpu freq value: {freq}.") + + +def cpu_freq_int_to_str(freq): + """Convert a numerical cpufreq value to its string representation.""" + if freq == slurm.CPU_FREQ_LOW: + return "Low" + elif freq == slurm.CPU_FREQ_MEDIUM: + return "Medium" + elif freq == slurm.CPU_FREQ_HIGHM1: + return "Highm1" + elif freq == slurm.CPU_FREQ_HIGH: + return "High" + elif freq == slurm.CPU_FREQ_CONSERVATIVE: + return "Conservative" + elif freq == slurm.CPU_FREQ_PERFORMANCE: + return "Performance" + elif freq == slurm.CPU_FREQ_POWERSAVE: + return "PowerSave" + elif freq == slurm.CPU_FREQ_USERSPACE: + return "UserSpace" + elif freq == slurm.CPU_FREQ_ONDEMAND: + return "OnDemand" + elif freq == slurm.CPU_FREQ_SCHEDUTIL: + return "SchedUtil" + elif freq & slurm.CPU_FREQ_RANGE_FLAG: + return None + elif freq == slurm.NO_VAL or freq == 0: + return None + else: + # This is in kHz + return freq + + +def dependency_str_to_dict(dep): + if not dep: + return None + + out = { + "after": [], + "afterany": [], + "afterburstbuffer": [], + "aftercorr": [], + "afternotok": [], + "afterok": [], + "singleton": False, + "satisfy": "all", + } + + delim = "," + if "?" in dep: + delim = "?" + out["satisfy"] = "any" + + for item in dep.split(delim): + if item == "singleton": + out["singleton"] = True + + dep_and_job = item.split(":", 1) + if len(dep_and_job) != 2: + continue + + dep_name, jobs = dep_and_job[0], dep_and_job[1].split(":") + if dep_name not in out: + continue + + for job in jobs: + out[dep_name].append(int(job) if job.isdigit() else job) + + return out diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd new file mode 100644 index 00000000..3f39ece7 --- /dev/null +++ b/pyslurm/core/node.pxd @@ -0,0 +1,222 @@ +######################################################################### +# node.pxd - interface to work with nodes in slurm +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from libc.string cimport memcpy, memset +from pyslurm cimport slurm +from pyslurm.slurm cimport ( + node_info_t, + node_info_msg_t, + update_node_msg_t, + partition_info_msg_t, + slurm_load_node, + slurm_load_node_single, + slurm_update_node, + slurm_delete_node, + slurm_create_node, + slurm_load_partitions, + slurm_free_update_node_msg, + slurm_init_update_node_msg, + slurm_populate_node_partitions, + slurm_free_node_info_msg, + slurm_free_node_info_members, + slurm_free_update_node_msg, + slurm_free_partition_info_msg, + slurm_get_select_nodeinfo, + slurm_sprint_cpu_bind_type, + slurm_node_state_string_complete, + slurm_node_state_string, + cpu_bind_type_t, +) + + +cdef class Nodes(dict): + """A collection of Node objects. + + Args: + nodes (Union[list, dict, str], optional): + Nodes to initialize this collection with. + + Attributes: + free_memory (int): + Amount of free memory in this node collection. (in Mebibytes) + real_memory (int): + Amount of real memory in this node collection. (in Mebibytes) + allocated_memory (int): + Amount of alloc Memory in this node collection. (in Mebibytes) + total_cpus (int): + Total amount of CPUs in this node collection. + idle_cpus (int): + Total amount of idle CPUs in this node collection. + allocated_cpus (int): + Total amount of allocated CPUs in this node collection. + effective_cpus (int): + Total amount of effective CPUs in this node collection. + current_watts (int): + Total amount of Watts consumed in this node collection. + avg_watts (int): + Amount of average watts consumed in this node collection. + + Raises: + MemoryError: If malloc fails to allocate memory. + """ + cdef: + node_info_msg_t *info + partition_info_msg_t *part_info + node_info_t tmp_info + + +cdef class Node: + """A Slurm node. + + Args: + name (str): + Name of a node + **kwargs: + Any writable property. Writable attributes include: + * name + * configured_gres + * address + * hostname + * extra + * comment + * weight + * available_features + * active_features + * cpu_binding + * state + + Attributes: + name (str): + Name of the node. + architecture (str): + Architecture of the node (e.g. x86_64) + configured_gres (dict): + Generic Resources this Node is configured with. + owner (str): + User that owns the Node. + address (str): + Address of the node. + hostname (str): + Hostname of the node. + extra (str): + Arbitrary string attached to the Node. + reason (str): + Reason why this node is in its current state. + reason_user (str): + Name of the User who set the reason. + comment (str): + Arbitrary node comment. + bcast_address (str): + Address of the node for sbcast. + slurm_version (str): + Version of slurm this node is running on. + operating_system (str): + Name of the operating system installed. + allocated_gres (dict): + Generic Resources currently in use on the node. + mcs_label (str): + MCS label for the node. + allocated_memory (int): + Memory in Mebibytes allocated on the node. + real_memory (int): + Real Memory in Mebibytes configured for this node. + free_memory (int): + Free Memory in Mebibytes on the node. + memory_reserved_for_system (int): + Raw Memory in Mebibytes reserved for the System not usable by + Jobs. + temporary_disk_space_per_node (int): + Amount of temporary disk space this node has, in Mebibytes. + weight (int): + Weight of the node in scheduling. + effective_cpus (int): + Number of effective CPUs the node has. + total_cpus (int): + Total amount of CPUs the node has. + sockets (int): + Number of sockets the node has. + cores_reserved_for_system (int): + Number of cores reserved for the System not usable by Jobs. + boards (int): + Number of boards the node has. + cores_per_socket (int): + Number of cores per socket configured for the node. + threads_per_core (int): + Number of threads per core configured for the node. + available_features (list): + List of features available on the node. + active_features (list): + List of features on the node. + partitions (list): + List of partitions this Node is part of. + boot_time (int): + Time the node has booted, as unix timestamp. + slurmd_start_time (int): + Time the slurmd has started on the Node, as unix timestamp. + last_busy_time (int): + Time this node was last busy, as unix timestamp. + reason_time (int): + Time the reason was set for the node, as unix timestamp. + allocated_cpus (int): + Number of allocated CPUs on the node. + idle_cpus (int): + Number of idle CPUs. + cpu_binding (str): + Default CPU-Binding on the node. + cap_watts (int): + Node cap watts. + current_watts (int): + Current amount of watts consumed on the node. + avg_watts (int): + Average amount of watts consumed on the node. + external_sensors (dict): + External Sensor info for the Node. + The dict returned contains the following information: + * joules_total (int) + * current_watts (int) + * temperature (int) + state (str): + State the node is currently in. + next_state (str): + Next state the node will be in. + cpu_load (float): + CPU Load on the Node. + slurmd_port (int): + Port the slurmd is listening on the node. + + Raises: + MemoryError: If malloc fails to allocate memory. + """ + cdef: + node_info_t *info + update_node_msg_t *umsg + dict passwd + dict groups + + @staticmethod + cdef _swap_data(Node dst, Node src) + + @staticmethod + cdef Node from_ptr(node_info_t *in_ptr) + diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx new file mode 100644 index 00000000..17429ce1 --- /dev/null +++ b/pyslurm/core/node.pyx @@ -0,0 +1,719 @@ +######################################################################### +# node.pyx - interface to work with nodes in slurm +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm.slurm cimport xfree, try_xmalloc +from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t +from pyslurm.core.common cimport cstr +from pyslurm.core.common import cstr +from pyslurm.core.common cimport ctime +from pyslurm.core.common import ctime +from pyslurm.core.common.ctime cimport time_t +from pyslurm.core.common.uint cimport * +from pyslurm.core.common.uint import * +from pyslurm.core.error import RPCError, verify_rpc +from pyslurm.core.common.ctime import timestamp_to_date, _raw_time +from pyslurm.core.common import ( + uid_to_name, + gid_to_name, + humanize, + _getgrall_to_dict, + _getpwall_to_dict, + cpubind_to_num, + instance_to_dict, + _sum_prop, + nodelist_from_range_str, +) + + +cdef class Nodes(dict): + + def __dealloc__(self): + slurm_free_node_info_msg(self.info) + slurm_free_partition_info_msg(self.part_info) + + def __cinit__(self): + self.info = NULL + self.part_info = NULL + + def __init__(self, nodes=None): + if isinstance(nodes, dict): + self.update(nodes) + elif isinstance(nodes, str): + nodelist = nodelist_from_range_str(nodes) + self.update({node: Node(node) for node in nodelist}) + elif nodes is not None: + for node in nodes: + if isinstance(node, str): + self[node] = Node(node) + else: + self[node.name] = node + + @staticmethod + def load(preload_passwd_info=False): + """Load all nodes in the system. + + Args: + preload_passwd_info (bool): + Decides whether to query passwd and groups information from + the system. + Could potentially speed up access to attributes of the Node + where a UID/GID is translated to a name. + If True, the information will fetched and stored in each of + the Node instances. The default is False. + + Returns: + (Nodes): Collection of node objects. + + Raises: + RPCError: When getting all the Nodes from the slurmctld failed. + MemoryError: If malloc fails to allocate memory. + """ + cdef: + dict passwd = {} + dict groups = {} + Nodes nodes = Nodes.__new__(Nodes) + int flags = slurm.SHOW_ALL + Node node + + verify_rpc(slurm_load_node(0, &nodes.info, flags)) + verify_rpc(slurm_load_partitions(0, &nodes.part_info, flags)) + slurm_populate_node_partitions(nodes.info, nodes.part_info) + + # If requested, preload the passwd and groups database to potentially + # speedup lookups for an attribute in a node, e.g "owner". + if preload_passwd_info: + passwd = _getpwall_to_dict() + groups = _getgrall_to_dict() + + # zero-out a dummy node_info_t + memset(&nodes.tmp_info, 0, sizeof(node_info_t)) + + # Put each node pointer into its own "Node" instance. + for cnt in range(nodes.info.record_count): + node = Node.from_ptr(&nodes.info.node_array[cnt]) + + # Prevent double free if xmalloc fails mid-loop and a MemoryError + # is raised by replacing it with a zeroed-out node_info_t. + nodes.info.node_array[cnt] = nodes.tmp_info + + if preload_passwd_info: + node.passwd = passwd + node.groups = groups + + nodes[node.name] = node + + # At this point we memcpy'd all the memory for the Nodes. Setting this + # to 0 will prevent the slurm node free function to deallocate the + # memory for the individual nodes. This should be fine, because they + # are free'd automatically in __dealloc__ since the lifetime of each + # node-pointer is tied to the lifetime of its corresponding "Node" + # instance. + nodes.info.record_count = 0 + + return nodes + + def reload(self): + """Reload the information for nodes in a collection. + + Note: + Only information for nodes which are already in the collection at + the time of calling this method will be reloaded. + + Raises: + RPCError: When getting the Nodes from the slurmctld failed. + """ + cdef Nodes reloaded_nodes + our_nodes = list(self.keys()) + + if not our_nodes: + return None + + reloaded_nodes = Nodes.load() + for node in list(self.keys()): + if node in reloaded_nodes: + # Put the new data in. + self[node] = reloaded_nodes[node] + + return self + + def as_list(self): + """Format the information as list of Node objects. + + Returns: + (list): List of Node objects + """ + return list(self.values()) + + @property + def free_memory(self): + return _sum_prop(self, Node.free_memory) + + @property + def real_memory(self): + return _sum_prop(self, Node.real_memory) + + @property + def allocated_memory(self): + return _sum_prop(self, Node.allocated_memory) + + @property + def total_cpus(self): + return _sum_prop(self, Node.total_cpus) + + @property + def idle_cpus(self): + return _sum_prop(self, Node.idle_cpus) + + @property + def allocated_cpus(self): + return _sum_prop(self, Node.allocated_cpus) + + @property + def effective_cpus(self): + return _sum_prop(self, Node.effective_cpus) + + @property + def current_watts(self): + return _sum_prop(self, Node.current_watts) + + @property + def avg_watts(self): + return _sum_prop(self, Node.avg_watts) + + +cdef class Node: + + def __cinit__(self): + self.info = NULL + self.umsg = NULL + + def __init__(self, name=None, **kwargs): + self._alloc_impl() + self.name = name + for k, v in kwargs.items(): + setattr(self, k, v) + + def _alloc_impl(self): + self._alloc_info() + self._alloc_umsg() + + def _alloc_info(self): + if not self.info: + self.info = try_xmalloc(sizeof(node_info_t)) + if not self.info: + raise MemoryError("xmalloc failed for node_info_t") + + def _alloc_umsg(self): + if not self.umsg: + self.umsg = try_xmalloc(sizeof(update_node_msg_t)) + if not self.umsg: + raise MemoryError("xmalloc failed for update_node_msg_t") + slurm_init_update_node_msg(self.umsg) + + def _dealloc_impl(self): + slurm_free_update_node_msg(self.umsg) + self.umsg = NULL + slurm_free_node_info_members(self.info) + xfree(self.info) + + def __dealloc__(self): + self._dealloc_impl() + + def __setattr__(self, name, val): + # When a user wants to set attributes on a Node instance that was + # created by calling Nodes(), the "umsg" pointer is not yet allocated. + # We only allocate memory for it by the time the user actually wants + # to modify something. + self._alloc_umsg() + # Call descriptors __set__ directly + Node.__dict__[name].__set__(self, val) + + def __eq__(self, other): + return isinstance(other, Node) and self.name == other.name + + @staticmethod + cdef Node from_ptr(node_info_t *in_ptr): + cdef Node wrap = Node.__new__(Node) + wrap._alloc_info() + wrap.passwd = {} + wrap.groups = {} + memcpy(wrap.info, in_ptr, sizeof(node_info_t)) + return wrap + + cdef _swap_data(Node dst, Node src): + cdef node_info_t *tmp = NULL + if dst.info and src.info: + tmp = dst.info + dst.info = src.info + src.info = tmp + + @staticmethod + def load(name): + """Load information for a specific node. + + Implements the slurm_load_node_single RPC. + + Returns: + (pyslurm.Node): Returns a new Node instance. + + Raises: + RPCError: If requesting the Node information from the slurmctld + was not successful. + MemoryError: If malloc failed to allocate memory. + + Examples: + >>> import pyslurm + >>> node = pyslurm.Node.load("localhost") + """ + cdef: + node_info_msg_t *node_info = NULL + partition_info_msg_t *part_info = NULL + Node wrap = Node.__new__(Node) + + try: + verify_rpc(slurm_load_node_single(&node_info, + name, slurm.SHOW_ALL)) + verify_rpc(slurm_load_partitions(0, &part_info, slurm.SHOW_ALL)) + slurm_populate_node_partitions(node_info, part_info) + + if node_info and node_info.record_count: + # Copy info + wrap._alloc_impl() + memcpy(wrap.info, &node_info.node_array[0], sizeof(node_info_t)) + node_info.record_count = 0 + else: + raise RPCError(msg=f"Node '{name}' does not exist") + except Exception as e: + raise e + finally: + slurm_free_node_info_msg(node_info) + slurm_free_partition_info_msg(part_info) + + return wrap + + def create(self, state="future"): + """Create a node. + + Implements the slurm_create_node RPC. + + Args: + future (str, optional): + An optional state the created Node should have. Allowed values + are "future" and "cloud". "future" is the default. + + Returns: + (Node): This function returns the current Node-instance object + itself. + + Raises: + RPCError: If creating the Node was not successful. + MemoryError: If malloc failed to allocate memory. + + Examples: + >>> from pyslurm import Node + >>> node = Node("testnode").create() + """ + if not self.name: + raise ValueError("You need to set a node name first.") + + self._alloc_umsg() + cstr.fmalloc(&self.umsg.extra, + f"NodeName={self.name} State={state}") + verify_rpc(slurm_create_node(self.umsg)) + + return self + + def modify(self, node=None, **kwargs): + """Modify a node. + + Implements the slurm_update_node RPC. + + Args: + node (pyslurm.Node): + Another Node object which contains all the changes that + should be applied to this instance. + **kwargs: + You can also specify all the changes as keyword arguments. + Allowed values are only attributes which can actually be set + on a Node instance. If a node is explicitly specified as + parameter, all **kwargs will be ignored. + + Raises: + RPCError: When updating the Node was not successful. + + Examples: + >>> from pyslurm import Node + >>> + >>> # Setting a new weight for the Node + >>> changes = Node(weight=100) + >>> Node("localhost").modify(changes) + >>> + >>> # Or by specifying the changes directly to the modify function + >>> Node("localhost").modify(weight=100) + """ + cdef Node n = self + + # Allow the user to both specify changes via a Node instance or + # **kwargs. + if node and isinstance(node, Node): + n = node + elif kwargs: + n = Node(**kwargs) + + n._alloc_umsg() + cstr.fmalloc(&n.umsg.node_names, self.name) + verify_rpc(slurm_update_node(n.umsg)) + + def delete(self): + """Delete a node. + + Implements the slurm_delete_node RPC. + + Raises: + RPCError: If deleting the Node was not successful. + MemoryError: If malloc failed to allocate memory. + + Examples: + >>> from pyslurm import Node + >>> Node("localhost").delete() + """ + self._alloc_umsg() + verify_rpc(slurm_delete_node(self.umsg)) + + def as_dict(self): + """Node information formatted as a dictionary. + + Returns: + (dict): Node information as dict + """ + return instance_to_dict(self) + + @property + def name(self): + return cstr.to_unicode(self.info.name) + + @name.setter + def name(self, val): + cstr.fmalloc2(&self.info.name, &self.umsg.node_names, val) + + @property + def architecture(self): + return cstr.to_unicode(self.info.arch) + + @property + def configured_gres(self): + return cstr.to_gres_dict(self.info.gres) + + @configured_gres.setter + def configured_gres(self, val): + cstr.fmalloc2(&self.info.gres, &self.umsg.gres, + cstr.from_gres_dict(val)) + + @property + def owner(self): + return uid_to_name(self.info.owner, lookup=self.passwd) + + @property + def address(self): + return cstr.to_unicode(self.info.node_addr) + + @address.setter + def address(self, val): + cstr.fmalloc2(&self.info.node_addr, &self.umsg.node_addr, val) + + @property + def hostname(self): + return cstr.to_unicode(self.info.node_hostname) + + @hostname.setter + def hostname(self, val): + cstr.fmalloc2(&self.info.node_hostname, &self.umsg.node_hostname, val) + + @property + def extra(self): + return cstr.to_unicode(self.info.extra) + + @extra.setter + def extra(self, val): + cstr.fmalloc2(&self.info.extra, &self.umsg.extra, val) + + @property + def reason(self): + return cstr.to_unicode(self.info.reason) + + @property + def reason_user(self): + return uid_to_name(self.info.reason_uid, lookup=self.passwd) + + @property + def comment(self): + return cstr.to_unicode(self.info.comment) + + @comment.setter + def comment(self, val): + cstr.fmalloc2(&self.info.comment, &self.umsg.comment, val) + + @property + def bcast_address(self): + return cstr.to_unicode(self.info.bcast_address) + + @property + def slurm_version(self): + return cstr.to_unicode(self.info.version) + + @property + def operating_system(self): + return cstr.to_unicode(self.info.os) + + @property + def allocated_gres(self): + return cstr.to_gres_dict(self.info.gres_used) + + @property + def mcs_label(self): + return cstr.to_unicode(self.info.mcs_label) + + @property + def allocated_memory(self): + cdef uint64_t alloc_memory = 0 + if self.info.select_nodeinfo: + slurm_get_select_nodeinfo( + self.info.select_nodeinfo, + slurm.SELECT_NODEDATA_MEM_ALLOC, + slurm.NODE_STATE_ALLOCATED, + &alloc_memory) + return alloc_memory + + @property + def real_memory(self): + return u64_parse(self.info.real_memory) + + @property + def free_memory(self): + return u64_parse(self.info.free_mem) + + @property + def memory_reserved_for_system(self): + return u64_parse(self.info.mem_spec_limit) + + @property + def temporary_disk_space(self): + return u32_parse(self.info.tmp_disk) + + @property + def weight(self): + return u32_parse(self.info.weight) + + @weight.setter + def weight(self, val): + self.info.weight=self.umsg.weight = u32(val) + + @property + def effective_cpus(self): + return u16_parse(self.info.cpus_efctv) + + @property + def total_cpus(self): + return u16_parse(self.info.cpus, on_noval=0) + + @property + def sockets(self): + return u16_parse(self.info.sockets, on_noval=0) + + @property + def cores_reserved_for_system(self): + return u16_parse(self.info.core_spec_cnt) + + @property + def boards(self): + return u16_parse(self.info.boards) + + @property + def cores_per_socket(self): + return u16_parse(self.info.cores) + + @property + def threads_per_core(self): + return u16_parse(self.info.threads) + + @property + def available_features(self): + return cstr.to_list(self.info.features) + + @available_features.setter + def available_features(self, val): + cstr.from_list2(&self.info.features, &self.umsg.features, val) + + @property + def active_features(self): + return cstr.to_list(self.info.features_act) + + @active_features.setter + def active_features(self, val): + cstr.from_list2(&self.info.features_act, &self.umsg.features_act, val) + + @property + def partitions(self): + return cstr.to_list(self.info.partitions) + + @property + def boot_time(self): + return _raw_time(self.info.boot_time) + + @property + def slurmd_start_time(self): + return _raw_time(self.info.slurmd_start_time) + + @property + def last_busy_time(self): + return _raw_time(self.info.last_busy) + + @property + def reason_time(self): + return _raw_time(self.info.reason_time) + +# @property +# def tres_configured(self): +# """dict: TRES that are configured on the node.""" +# return cstr.to_dict(self.info.tres_fmt_str) + +# @property +# def tres_alloc(self): +# cdef char *alloc_tres = NULL +# if self.info.select_nodeinfo: +# slurm_get_select_nodeinfo( +# self.info.select_nodeinfo, +# slurm.SELECT_NODEDATA_TRES_ALLOC_FMT_STR, +# slurm.NODE_STATE_ALLOCATED, +# &alloc_tres +# ) +# return cstr.to_gres_dict(alloc_tres) + + @property + def allocated_cpus(self): + cdef uint16_t alloc_cpus = 0 + if self.info.select_nodeinfo: + slurm_get_select_nodeinfo( + self.info.select_nodeinfo, + slurm.SELECT_NODEDATA_SUBCNT, + slurm.NODE_STATE_ALLOCATED, + &alloc_cpus + ) + return alloc_cpus + + @property + def idle_cpus(self): + efctv = self.effective_cpus + if not efctv: + return None + + return efctv - self.allocated_cpus + + @property + def cpu_binding(self): + cdef char cpu_bind[128] + slurm_sprint_cpu_bind_type(cpu_bind, + self.info.cpu_bind) + if cpu_bind == "(null type)": + return None + + return cstr.to_unicode(cpu_bind) + + @cpu_binding.setter + def cpu_binding(self, val): + self.info.cpu_bind=self.umsg.cpu_bind = cpubind_to_num(val) + + @property + def cap_watts(self): + if not self.info.power: + return 0 + return u32_parse(self.info.power.cap_watts, on_noval=0) + + @property + def current_watts(self): + if not self.info.energy: + return 0 + return u32_parse(self.info.energy.current_watts, on_noval=0) + + @property + def avg_watts(self): + if not self.info.energy: + return 0 + return u32_parse(self.info.energy.ave_watts, on_noval=0) + + @property + def external_sensors(self): + if not self.info.ext_sensors: + return {} + + return { + "joules_total": u64_parse(self.info.ext_sensors.consumed_energy), + "current_watts": u32_parse(self.info.ext_sensors.current_watts), + "temperature": u32_parse(self.info.ext_sensors.temperature) + } + + @property + def state(self): + cdef char* state = slurm_node_state_string_complete( + self.info.node_state) + state_str = cstr.to_unicode(state) + xfree(state) + return state_str + + @property + def next_state(self): + if ((self.info.next_state != slurm.NO_VAL) + and (self.info.node_state & slurm.NODE_STATE_REBOOT_REQUESTED + or self.info.node_state & slurm.NODE_STATE_REBOOT_ISSUED)): + return cstr.to_unicode( + slurm_node_state_string(self.info.next_state)) + else: + return None + + @state.setter + def state(self, val): + self.umsg.node_state=self.info.node_state = _node_state_from_str(val) + + @property + def cpu_load(self): + load = u32_parse(self.info.cpu_load) + return load / 100.0 if load is not None else 0.0 + + @property + def slurmd_port(self): + return u16_parse(self.info.port) + + +def _node_state_from_str(state, err_on_invalid=True): + if not state: + return slurm.NO_VAL + + for i in range(slurm.NODE_STATE_END): + if state == slurm_node_state_string(i): + return i + + if err_on_invalid: + raise ValueError(f"Invalid Node state: {state}") + else: + return slurm.NO_VAL diff --git a/pyslurm/core/slurmctld.pxd b/pyslurm/core/slurmctld.pxd new file mode 100644 index 00000000..f65655c8 --- /dev/null +++ b/pyslurm/core/slurmctld.pxd @@ -0,0 +1,38 @@ +######################################################################### +# slurmctld.pxd - pyslurm slurmctld api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.slurm cimport ( + slurm_conf_t, + slurm_load_ctl_conf, + slurm_free_ctl_conf, + try_xmalloc, +) +from pyslurm.core.common cimport cstr +from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t, int64_t +from pyslurm.core.common.uint cimport * + + +cdef class Config: + cdef slurm_conf_t *ptr diff --git a/pyslurm/core/slurmctld.pyx b/pyslurm/core/slurmctld.pyx new file mode 100644 index 00000000..2b5367c5 --- /dev/null +++ b/pyslurm/core/slurmctld.pyx @@ -0,0 +1,48 @@ +######################################################################### +# slurmctld.pyx - pyslurm slurmctld api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm.core.error import verify_rpc, RPCError + + +cdef class Config: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, job_id): + raise RuntimeError("Cannot instantiate class directly") + + def __dealloc__(self): + slurm_free_ctl_conf(self.ptr) + self.ptr = NULL + + @staticmethod + def load(): + cdef Config conf = Config.__new__(Config) + verify_rpc(slurm_load_ctl_conf(0, &conf.ptr)) + return conf + + @property + def cluster(self): + return cstr.to_unicode(self.ptr.cluster_name) diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index adbed03e..89b226a2 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -373,26 +373,6 @@ def slurm_load_slurmd_status(): return Status -def slurm_init(conf_file=None): - """Initialize the Slurm API internal structures. - - This function MUST be called before any internal API calls to ensure - Slurm's internal configuration structures have been populated. - - Args: - conf_file (str, optional): Absolute path to the configuration file. If - None (default value), libslurm automatically locates its own - configuration. - """ - if conf_file: - slurm.slurm_init(conf_file.encode('UTF-8')) - else: - slurm.slurm_init(NULL) - -def slurm_fini(): - """Cleanup Slurm internal configuration structures.""" - slurm.slurm_fini() - # # Slurm Config Class # @@ -6758,6 +6738,3 @@ cdef class licenses: else: apiError = slurm.slurm_get_errno() raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(apiError), ''), apiError) - -# Automatically load Slurm configuration data structure at pyslurm module load -slurm_init() diff --git a/pyslurm/slurm/SLURM_DISCLAIMER b/pyslurm/slurm/SLURM_DISCLAIMER new file mode 100644 index 00000000..5fb615d5 --- /dev/null +++ b/pyslurm/slurm/SLURM_DISCLAIMER @@ -0,0 +1,159 @@ +Slurm was produced at Lawrence Livermore National Laboratory in collaboration +with various organizations. + +Copyright (C) 2012-2013 Los Alamos National Security, LLC. +Copyright (C) 2011 Trinity Centre for High Performance Computing +Copyright (C) 2010-2015 SchedMD LLC +Copyright (C) 2009-2013 CEA/DAM/DIF +Copyright (C) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS) +Copyright (C) 2008-2011 Lawrence Livermore National Security +Copyright (C) 2008 Vijay Ramasubramanian +Copyright (C) 2007-2008 Red Hat, Inc. +Copyright (C) 2007-2013 National University of Defense Technology, China +Copyright (C) 2007-2015 Bull +Copyright (C) 2005-2008 Hewlett-Packard Development Company, L.P. +Copyright (C) 2004-2009, Marcus Holland-Moritz +Copyright (C) 2002-2007 The Regents of the University of California +Copyright (C) 2002-2003 Linux NetworX +Copyright (C) 2002 University of Chicago +Copyright (C) 2001, Paul Marquess +Copyright (C) 2000 Markus Friedl +Copyright (C) 1999, Kenneth Albanowski +Copyright (C) 1998 Todd C. Miller +Copyright (C) 1996-2003 Maximum Entropy Data Consultants Ltd, +Copyright (C) 1995 Tatu Ylonen , Espoo, Finland +Copyright (C) 1989-1994, 1996-1999, 2001 Free Software Foundation, Inc. +Many other organizations contributed code and/or documentation without +including a copyright notice. + +Written by: +Amjad Majid Ali (Colorado State University) +Par Andersson (National Supercomputer Centre, Sweden) +Don Albert (Bull) +Ernest Artiaga (Barcelona Supercomputer Center, Spain) +Danny Auble (LLNL, SchedMD LLC) +Susanne Balle (HP) +Anton Blanchard (Samba) +Janne Blomqvist (Aalto University, Finland) +David Bremer (LLNL) +Jon Bringhurst (LANL) +Bill Brophy (Bull) +Hongjia Cao (National University of Defense Techonogy, China) +Daniel Christians (HP) +Gilles Civario (Bull) +Chuck Clouston (Bull) +Joseph Donaghy (LLNL) +Chris Dunlap (LLNL) +Joey Ekstrom (LLNL/Bringham Young University) +Josh England (TGS Management Corporation) +Kent Engstrom (National Supercomputer Centre, Sweden) +Jim Garlick (LLNL) +Didier Gazen (Laboratoire d'Aerologie, France) +Raphael Geissert (Debian) +Yiannis Georgiou (Bull) +Andriy Grytsenko (Massive Solutions Limited, Ukraine) +Mark Grondona (LLNL) +Takao Hatazaki (HP, Japan) +Matthieu Hautreux (CEA, France) +Chris Holmes (HP) +David Hoppner +Nathan Huff (North Dakota State University) +David Jackson (Adaptive Computing) +Morris Jette (LLNL, SchedMD LLC) +Klaus Joas (University Karlsruhe, Germany) +Greg Johnson (LANL) +Jason King (LLNL) +Aaron Knister (Environmental Protection Agency) +Nancy Kritkausky (Bull) +Roman Kurakin (Institute of Natural Science and Ecology, Russia) +Eric Lin (Bull) +Don Lipari (LLNL) +Puenlap Lee (Bull) +Dennis Leepow +Bernard Li (Genome Sciences Centre, Canada) +Donald Lipari (LLNL) +Steven McDougall (SiCortex) +Donna Mecozzi (LLNL) +Bjorn-Helge Mevik (University of Oslo, Norway) +Chris Morrone (LLNL) +Pere Munt (Barcelona Supercomputer Center, Spain) +Michal Novotny (Masaryk University, Czech Republic) +Bryan O'Sullivan (Pathscale) +Gennaro Oliva (Institute of High Performance Computing and Networking, Italy) +Alejandro Lucero Palau (Barcelona Supercomputer Center, Spain) +Daniel Palermo (HP) +Dan Phung (LLNL/Columbia University) +Ashley Pittman (Quadrics, UK) +Vijay Ramasubramanian (University of Maryland) +Krishnakumar Ravi[KK] (HP) +Petter Reinholdtsen (University of Oslo, Norway) +Gerrit Renker (Swiss National Computer Centre) +Andy Riebs (HP) +Asier Roa (Barcelona Supercomputer Center, Spain) +Miguel Ros (Barcelona Supercomputer Center, Spain) +Beat Rubischon (DALCO AG, Switzerland) +Dan Rusak (Bull) +Eygene Ryabinkin (Kurchatov Institute, Russia) +Federico Sacerdoti (D.E. Shaw) +Rod Schultz (Bull) +Tyler Strickland (University of Florida) +Jeff Squyres (LAM MPI) +Prashanth Tamraparni (HP, India) +Jimmy Tang (Trinity College, Ireland) +Kevin Tew (LLNL/Bringham Young University) +Adam Todorski (Rensselaer Polytechnic Institute) +Nathan Weeks (Iowa State University) +Tim Wickberg (Rensselaer Polytechnic Institute) +Ramiro Brito Willmersdorf (Universidade Federal de Pemambuco, Brazil) +Jay Windley (Linux NetworX) +Anne-Marie Wunderlin (Bull) + +CODE-OCEC-09-009. All rights reserved. + +This file is part of Slurm, a resource management program. +For details, see . +Please also read the supplied file: DISCLAIMER. + +Slurm is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2 of the License, or (at your option) +any later version. + +Slurm is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +details. + +You should have received a copy of the GNU General Public License along +with Slurm; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +OUR NOTICE AND TERMS OF AND CONDITIONS OF THE GNU GENERAL PUBLIC LICENSE + +Our Preamble Notice + +Auspices + +This work performed under the auspices of the U.S. Department of Energy by +Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344. + +Disclaimer + +This work was sponsored by an agency of the United States government. +Neither the United States Government nor Lawrence Livermore National +Security, LLC, nor any of their employees, makes any warranty, express +or implied, or assumes any liability or responsibility for the accuracy, +completeness, or usefulness of any information, apparatus, product, or +process disclosed, or represents that its use would not infringe privately +owned rights. References herein to any specific commercial products, process, +or services by trade names, trademark, manufacturer or otherwise does not +necessarily constitute or imply its endorsement, recommendation, or +favoring by the United States Government or the Lawrence Livermore National +Security, LLC. The views and opinions of authors expressed herein do not +necessarily state or reflect those of the United States government or +Lawrence Livermore National Security, LLC, and shall not be used for +advertising or product endorsement purposes. + +The precise terms and conditions for copying, distribution and modification +is provided in the file named "COPYING" in this directory. diff --git a/pyslurm/slurm/SLURM_LICENSE b/pyslurm/slurm/SLURM_LICENSE new file mode 100644 index 00000000..0fd4db48 --- /dev/null +++ b/pyslurm/slurm/SLURM_LICENSE @@ -0,0 +1,389 @@ + SLURM LICENSE AGREEMENT + +All Slurm code and documentation is available under the GNU General Public +License. Some tools in the "contribs" directory have other licenses. See +the documentation for individual contributed tools for details. + +In addition, as a special exception, the copyright holders give permission +to link the code of portions of this program with the OpenSSL library under +certain conditions as described in each individual source file, and distribute +linked combinations including the two. You must obey the GNU General Public +License in all respects for all of the code used other than OpenSSL. If you +modify file(s) with this exception, you may extend this exception to your +version of the file(s), but you are not obligated to do so. If you do not +wish to do so, delete this exception statement from your version. If you +delete this exception statement from all source files in the program, then +also delete it here. + +NO WARRANTY: Because the program is licensed free of charge, there is no +warranty for the program. See section 11 below for full details. + +============================================================================= + +OUR NOTICE AND TERMS OF AND CONDITIONS OF THE GNU GENERAL PUBLIC LICENSE + +Auspices + +Portions of this work were performed under the auspices of the U.S. Department +of Energy by Lawrence Livermore National Laboratory under Contract +DE-AC52-07NA27344. + +Disclaimer + +This work was sponsored by an agency of the United States government. +Neither the United States Government nor Lawrence Livermore National +Security, LLC, nor any of their employees, makes any warranty, express +or implied, or assumes any liability or responsibility for the accuracy, +completeness, or usefulness of any information, apparatus, product, or +process disclosed, or represents that its use would not infringe privately +owned rights. References herein to any specific commercial products, process, +or services by trade names, trademark, manufacturer or otherwise does not +necessarily constitute or imply its endorsement, recommendation, or +favoring by the United States Government or the Lawrence Livermore National +Security, LLC. The views and opinions of authors expressed herein do not +necessarily state or reflect those of the United States government or +Lawrence Livermore National Security, LLC, and shall not be used for +advertising or product endorsement purposes. + +============================================================================= + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/pyslurm/slurm/__init__.pxd b/pyslurm/slurm/__init__.pxd index f1fbdd6f..f29bfc00 100644 --- a/pyslurm/slurm/__init__.pxd +++ b/pyslurm/slurm/__init__.pxd @@ -61,7 +61,6 @@ cdef extern from '' nogil: cdef extern from *: ctypedef struct slurm_job_credential ctypedef struct switch_jobinfo - ctypedef struct job_resources ctypedef struct select_jobinfo ctypedef struct select_nodeinfo ctypedef struct jobacctinfo diff --git a/pyslurm/slurm/extra.pxi b/pyslurm/slurm/extra.pxi index 50fccb23..0ccb0708 100644 --- a/pyslurm/slurm/extra.pxi +++ b/pyslurm/slurm/extra.pxi @@ -5,7 +5,6 @@ # For example: to communicate with the slurmctld directly in order # to retrieve the actual batch-script as a string. # - # https://github.com/SchedMD/slurm/blob/26abe9188ea8712ba1eab4a8eb6322851f06a108/src/common/slurm_persist_conn.h#L51 ctypedef enum persist_conn_type_t: PERSIST_TYPE_NONE = 0 @@ -23,6 +22,7 @@ ctypedef struct persist_msg_t: uint16_t msg_type ctypedef int (*_slurm_persist_conn_t_callback_proc) (void *arg, persist_msg_t *msg, buf_t **out_buffer, uint32_t *uid) + ctypedef void (*_slurm_persist_conn_t_callback_fini)(void *arg) # https://github.com/SchedMD/slurm/blob/26abe9188ea8712ba1eab4a8eb6322851f06a108/src/common/slurm_persist_conn.h#L66 @@ -116,7 +116,7 @@ ctypedef struct slurm_msg_t: # https://github.com/SchedMD/slurm/blob/fe82218def7b57f5ecda9222e80662ebbb6415f8/src/common/slurm_protocol_defs.c#L865 cdef extern void slurm_free_return_code_msg(return_code_msg_t *msg) -# https://github.com/SchedMD/slurm/blob/2d2e83674b59410a7ed8ab6fc8d8acfcfa8beaf9/src/common/slurm_protocol_api.c#L2401 +# https://github.com/SchedMD/slurm/blob/2d2e83674b59410a7ed8ab6fc8d8acfcfa8beaf9/src/common/slurm_protocol_api.c#L2401 cdef extern int slurm_send_recv_controller_msg(slurm_msg_t *request_msg, slurm_msg_t *response_msg, slurmdb_cluster_rec_t *working_cluster_rec) @@ -124,29 +124,58 @@ cdef extern int slurm_send_recv_controller_msg(slurm_msg_t *request_msg, # https://github.com/SchedMD/slurm/blob/fe82218def7b57f5ecda9222e80662ebbb6415f8/src/common/slurm_protocol_defs.c#L168 cdef extern void slurm_msg_t_init(slurm_msg_t *msg) +# https://github.com/SchedMD/slurm/blob/master/src/common/job_resources.h +ctypedef struct job_resources: + bitstr_t *core_bitmap + bitstr_t *core_bitmap_used + uint32_t cpu_array_cnt + uint16_t *cpu_array_value + uint32_t *cpu_array_reps + uint16_t *cpus + uint16_t *cpus_used + uint16_t *cores_per_socket + uint16_t cr_type + uint64_t *memory_allocated + uint64_t *memory_used + uint32_t nhosts + bitstr_t *node_bitmap + uint32_t node_req + char *nodes + uint32_t ncpus + uint32_t *sock_core_rep_count + uint16_t *sockets_per_node + uint16_t *tasks_per_node + uint16_t threads_per_core + uint8_t whole_node -# Global Environment +# +# TRES +# +ctypedef enum tres_types_t: + TRES_CPU = 1 + TRES_MEM + TRES_ENERGY + TRES_NODE + TRES_BILLING + TRES_FS_DISK + TRES_VMEM + TRES_PAGES + TRES_STATIC_CNT +# Global Environment cdef extern char **environ # # Slurm Memory routines +# We simply use the macros from xmalloc.h - more convenient # -cdef extern void slurm_xfree (void **) -cdef extern void *slurm_xcalloc(size_t, size_t, bool, bool, const char *, int, const char *) +cdef extern from "pyslurm/slurm/xmalloc.h" nogil: + void xfree(void *__p) + void *xmalloc(size_t __sz) + void *try_xmalloc(size_t __sz) -cdef inline xfree(void *__p): - slurm_xfree(&__p) - -cdef inline void *xmalloc(size_t __sz): - return slurm_xcalloc(1, __sz, True, False, __FILE__, __LINE__, __FUNCTION__) - -cdef inline void *try_xmalloc(size_t __sz): - return slurm_xcalloc(1, __sz, True, True, __FILE__, __LINE__, __FUNCTION__) - -cdef inline void xfree_ptr(void *__p): - slurm_xfree(&__p) +cdef extern void slurm_xfree_ptr(void *) # # Slurm xstring functions @@ -177,6 +206,16 @@ cdef extern void slurm_free_job_step_info_members(job_step_info_t *msg) cdef extern char *slurm_job_state_string(uint16_t inx) cdef extern char *slurm_job_reason_string(int inx) cdef extern char *slurm_job_share_string(uint16_t shared) +cdef extern void slurm_free_update_step_msg(step_update_request_msg_t *msg) + +# +# Slurm Node functions +# + +cdef extern int slurm_get_select_nodeinfo(dynamic_plugin_data_t *nodeinfo, select_nodedata_type data_type, node_states state, void *data) +cdef extern char *slurm_node_state_string_complete(uint32_t inx) +cdef extern void slurm_free_update_node_msg(update_node_msg_t *msg) +cdef extern void slurm_free_node_info_members(node_info_t *node) # # Slurm environment functions @@ -191,6 +230,7 @@ cdef extern void slurm_env_array_free(char **env_array) # cdef extern char *slurm_preempt_mode_string (uint16_t preempt_mode) +cdef extern uint16_t slurm_preempt_mode_num (const char *preempt_mode) cdef extern char *slurm_node_state_string (uint32_t inx) cdef extern char *slurm_step_layout_type_name (task_dist_states_t task_dist) cdef extern char *slurm_reservation_flags_string (reserve_info_t *resv_ptr) @@ -199,3 +239,35 @@ cdef extern int slurm_addto_char_list_with_case(List char_list, char *names, boo cdef extern int slurm_addto_step_list(List step_list, char *names) cdef extern int slurmdb_report_set_start_end_time(time_t *start, time_t *end) cdef extern uint16_t slurm_get_track_wckey() +cdef extern void slurm_sprint_cpu_bind_type(char *str, cpu_bind_type_t cpu_bind_type) + +# Slurm bit functions + +cdef extern bitstr_t *slurm_bit_alloc(bitoff_t nbits) +cdef extern void slurm_bit_set(bitstr_t *b, bitoff_t bit) +cdef extern int slurm_bit_test(bitstr_t *b, bitoff_t bit) +cdef extern char *slurm_bit_fmt(char *str, int32_t len, bitstr_t *b) +cdef extern void slurm_bit_free(bitstr_t **b) + + +cdef extern from *: + """ + #define bit_free(__b) slurm_bit_free((bitstr_t **)&(__b)) + #define FREE_NULL_BITMAP(_X) \ + do { \ + if (_X) \ + bit_free(_X); \ + _X = NULL; \ + } while(0) \ + """ + void bit_free(bitstr_t *_X) + void FREE_NULL_BITMAP(bitstr_t *_X) + +cdef extern char *slurm_hostlist_deranged_string_malloc(hostlist_t hl) + +# +# Slurmdbd functions +# + +cdef extern void slurmdb_job_cond_def_start_end(slurmdb_job_cond_t *job_cond) +cdef extern uint64_t slurmdb_find_tres_count_in_string(char *tres_str_in, int id) diff --git a/pyslurm/slurm/xmalloc.h b/pyslurm/slurm/xmalloc.h new file mode 100644 index 00000000..f1db7b5f --- /dev/null +++ b/pyslurm/slurm/xmalloc.h @@ -0,0 +1,117 @@ +/*****************************************************************************\ + * xmalloc.h - enhanced malloc routines for slurm + * - default: never return if errors are encountered. + * - attempt to report file, line, and calling function on assertion failure + * - use configurable slurm log facility for reporting errors + ***************************************************************************** + * Copyright (C) 2002 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Jim Garlick and + * Mark Grondona + * CODE-OCEC-09-009. All rights reserved. + * + * This file is part of Slurm, a resource management program. + * For details, see . + * Please also read the included file: DISCLAIMER. + * + * Slurm is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * Slurm is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with Slurm; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + ***************************************************************************** + * Description: + * + * void *xmalloc(size_t size); + * void xrealloc(void *p, size_t newsize); + * void xfree(void *p); + * int xsize(void *p); + * + * xmalloc(size) allocates size bytes and returns a pointer to the allocated + * memory. The memory is set to zero. xmalloc() will not return unless + * there are no errors. The memory must be freed using xfree(). + * + * xrealloc(p, newsize) changes the size of the block pointed to by p to the + * value of newsize. Newly allocated memory is zeroed. If p is NULL, + * xrealloc() performs the same function as `p = xmalloc(newsize)'. If p + * is not NULL, it is required to have been initialized with a call to + * [try_]xmalloc() or [try_]xrealloc(). + * + * xfree(p) frees the memory block pointed to by p. The memory must have been + * initialized with a call to [try_]xmalloc() or [try_]xrealloc(). + * + * xsize(p) returns the current size of the memory allocation pointed to by + * p. The memory must have been allocated with [try_]xmalloc() or + * [try_]xrealloc(). + * +\*****************************************************************************/ + +#ifndef _XMALLOC_H +#define _XMALLOC_H + +#include +#include + +#define xcalloc(__cnt, __sz) \ + slurm_xcalloc(__cnt, __sz, true, false, __FILE__, __LINE__, __func__) + +#define try_xcalloc(__cnt, __sz) \ + slurm_xcalloc(__cnt, __sz, true, true, __FILE__, __LINE__, __func__) + +#define xcalloc_nz(__cnt, __sz) \ + slurm_xcalloc(__cnt, __sz, false, false, __FILE__, __LINE__, __func__) + +#define xmalloc(__sz) \ + slurm_xcalloc(1, __sz, true, false, __FILE__, __LINE__, __func__) + +#define try_xmalloc(__sz) \ + slurm_xcalloc(1, __sz, true, true, __FILE__, __LINE__, __func__) + +#define xmalloc_nz(__sz) \ + slurm_xcalloc(1, __sz, false, false, __FILE__, __LINE__, __func__) + +#define xfree(__p) slurm_xfree((void **)&(__p)) + +#define xfree_array(__p) slurm_xfree_array((void ***)&(__p)) + +#define xrecalloc(__p, __cnt, __sz) \ + slurm_xrecalloc((void **)&(__p), __cnt, __sz, true, false, __FILE__, __LINE__, __func__) + +#define xrealloc(__p, __sz) \ + slurm_xrecalloc((void **)&(__p), 1, __sz, true, false, __FILE__, __LINE__, __func__) + +#define try_xrealloc(__p, __sz) \ + slurm_xrecalloc((void **)&(__p), 1, __sz, true, true, __FILE__, __LINE__, __func__) + +#define xrealloc_nz(__p, __sz) \ + slurm_xrecalloc((void **)&(__p), 1, __sz, false, false, __FILE__, __LINE__, __func__) + +void *slurm_xcalloc(size_t, size_t, bool, bool, const char *, int, const char *); +void slurm_xfree(void **); +void slurm_xfree_array(void ***); +void *slurm_xrecalloc(void **, size_t, size_t, bool, bool, const char *, int, const char *); + +size_t xsize(void *item); + +void xfree_ptr(void *); + +#endif /* !_XMALLOC_H */ diff --git a/setup.cfg b/setup.cfg index 17a6e9f3..78d52108 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,9 @@ +[options] +packages = find: + +[options.packages.find] +include = pyslurm, pyslurm.* + [bdist_rpm] release = 1 packager = Giovanni Torres diff --git a/setup.py b/setup.py index 796faa6a..7b96fdc8 100644 --- a/setup.py +++ b/setup.py @@ -33,7 +33,6 @@ url="https://github.com/PySlurm/pyslurm", platforms=["Linux"], keywords=["HPC", "Batch Scheduler", "Resource Manager", "Slurm", "Cython"], - packages=["pyslurm"], classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 00000000..bf70149c --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,44 @@ +######################################################################### +# conftest.py - pytest fixtures +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import pytest +from pyslurm import ( + Job, + JobSubmitDescription, +) +from util import create_simple_job_desc + + +@pytest.fixture +def submit_job(): + + jobs = [] + def _job(script=None, **kwargs): + job_desc = create_simple_job_desc(script, **kwargs) + job = Job(job_desc.submit()) + + jobs.append(job) + return job + + yield _job + + for j in jobs: + j.cancel() diff --git a/tests/integration/test_db_connection.py b/tests/integration/test_db_connection.py new file mode 100644 index 00000000..876ec63d --- /dev/null +++ b/tests/integration/test_db_connection.py @@ -0,0 +1,56 @@ +######################################################################### +# test_db_connection.py - database connection api integration tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_db_connection.py - Test database connectin api functionalities.""" + +import pytest +import pyslurm + + +def test_create_instance(): + with pytest.raises(RuntimeError): + pyslurm.db.Connection() + + +def test_open(): + conn = pyslurm.db.Connection.open() + assert conn.is_open + + +def test_close(): + conn = pyslurm.db.Connection.open() + assert conn.is_open + + conn.close() + assert not conn.is_open + # no-op + conn.close() + + +def test_commit(): + conn = pyslurm.db.Connection.open() + assert conn.is_open + conn.commit() + + +def test_rollback(): + conn = pyslurm.db.Connection.open() + assert conn.is_open + conn.rollback() diff --git a/tests/integration/test_db_job.py b/tests/integration/test_db_job.py new file mode 100644 index 00000000..2c84ef4f --- /dev/null +++ b/tests/integration/test_db_job.py @@ -0,0 +1,100 @@ +######################################################################### +# test_db_job.py - database job api integration tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_db_job.py - Unit test database job api functionalities.""" + +import pytest +import pyslurm +import time +import util + + +# TODO: Instead of submitting new Jobs and waiting to test Database API +# functionality, we could just fill a slurm database with data on a host, then +# dump the slurm_acct_db to a SQL file and import it in the test environment +# before the integration tests are ran. +# Just a few Jobs and other stuff is enough to keep it small, so it could also +# be put in the repository and uploaded to github. + + +def test_load_single(submit_job): + job = submit_job() + util.wait() + db_job = pyslurm.db.Job.load(job.id) + + assert db_job.id == job.id + + with pytest.raises(pyslurm.RPCError): + pyslurm.db.Job.load(1000) + + +def test_parse_all(submit_job): + job = submit_job() + util.wait() + db_job = pyslurm.db.Job.load(job.id) + job_dict = db_job.as_dict() + + assert job_dict["stats"] + assert job_dict["steps"] + + +def test_modify(submit_job): + # TODO + pass + + +def test_if_steps_exist(submit_job): + # TODO + pass + + +def test_load_with_filter_node(submit_job): + # TODO + pass + + +def test_load_with_filter_qos(submit_job): + # TODO + pass + + +def test_load_with_filter_cluster(submit_job): + # TODO + pass + + +def test_load_with_filter_multiple(submit_job): + # TODO + pass + + +def test_load_with_script(submit_job): + script = util.create_job_script() + job = submit_job(script=script) + util.wait(5) + db_job = pyslurm.db.Job.load(job.id, with_script=True) + assert db_job.script == script + + +def test_load_with_env(submit_job): + job = submit_job() + util.wait(5) + db_job = pyslurm.db.Job.load(job.id, with_env=True) + assert db_job.environment diff --git a/tests/integration/test_db_qos.py b/tests/integration/test_db_qos.py new file mode 100644 index 00000000..5bbd69e4 --- /dev/null +++ b/tests/integration/test_db_qos.py @@ -0,0 +1,55 @@ +######################################################################### +# test_db_qos.py - database qos api integration tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_db_qos.py - Integration test database qos api functionalities.""" + +import pytest +import pyslurm +import time +import util + + +def test_load_single(): + qos = pyslurm.db.QualityOfService.load("normal") + + assert qos.name == "normal" + assert qos.id == 1 + + with pytest.raises(pyslurm.RPCError): + pyslurm.db.QualityOfService.load("qos_non_existent") + + +def test_parse_all(submit_job): + qos = pyslurm.db.QualityOfService.load("normal") + qos_dict = qos.as_dict() + + assert qos_dict + assert qos_dict["name"] == qos.name + + +def test_load_all(): + qos = pyslurm.db.QualitiesOfService.load() + assert qos + + +def test_load_with_filter_name(): + qfilter = pyslurm.db.QualityOfServiceSearchFilter(names=["non_existent"]) + qos = pyslurm.db.QualitiesOfService.load(qfilter) + assert not qos diff --git a/tests/integration/test_job.py b/tests/integration/test_job.py new file mode 100644 index 00000000..15c4bdef --- /dev/null +++ b/tests/integration/test_job.py @@ -0,0 +1,162 @@ +######################################################################### +# test_job.py - job api integration tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_job.py - Integration test job api functionalities.""" + +import time +import pytest +import pyslurm +import util +from util import create_simple_job_desc +from pyslurm import ( + Job, + Jobs, + JobSubmitDescription, + RPCError, +) + + +def test_parse_all(submit_job): + job = submit_job() + # Use the as_dict() function to test if parsing works for all + # properties on a simple Job without error. + Job.load(job.id).as_dict() + + +def test_load(submit_job): + job = submit_job() + jid = job.id + + # Nothing has been loaded at this point, just make sure everything is + # on default values. + assert job.ntasks == 1 + assert job.cpus_per_task == 1 + assert job.time_limit == None + + # Now load the job info + job = Job.load(jid) + + assert job.id == jid + assert job.ntasks == 2 + assert job.cpus_per_task == 3 + assert job.time_limit == 1440 + + with pytest.raises(RPCError): + Job.load(99999) + + +def test_cancel(submit_job): + job = submit_job() + job.cancel() + # make sure the job is actually cancelled + time.sleep(util.WAIT_SECS_SLURMCTLD) + assert Job.load(job.id).state == "CANCELLED" + + +def test_send_signal(submit_job): + job = submit_job() + + time.sleep(util.WAIT_SECS_SLURMCTLD) + assert Job.load(job.id).state == "RUNNING" + + # Send a SIGKILL (basically cancelling the Job) + job.send_signal(9) + + # make sure the job is actually cancelled + time.sleep(util.WAIT_SECS_SLURMCTLD) + assert Job.load(job.id).state == "CANCELLED" + + +def test_suspend_unsuspend(submit_job): + job = submit_job() + + time.sleep(util.WAIT_SECS_SLURMCTLD) + job.suspend() + assert Job.load(job.id).state == "SUSPENDED" + + job.unsuspend() + # make sure the job is actually running again + time.sleep(util.WAIT_SECS_SLURMCTLD) + assert Job.load(job.id).state == "RUNNING" + + +# Don't need to test hold/resume, since it uses just job.modify() to set +# priority to 0/INFINITE. +def test_modify(submit_job): + job = submit_job(priority=0) + job = Job(job.id) + + changes = JobSubmitDescription( + time_limit = "2-00:00:00", + ntasks = 5, + cpus_per_task = 4, + ) + + job.modify(changes) + job = Job.load(job.id) + + assert job.time_limit == 2880 + assert job.ntasks == 5 + assert job.cpus_per_task == 4 + + +def test_requeue(submit_job): + job = submit_job() + job = Job.load(job.id) + + assert job.requeue_count == 0 + + time.sleep(util.WAIT_SECS_SLURMCTLD) + job.requeue() + job = Job.load(job.id) + + assert job.requeue_count == 1 + + +def test_notify(submit_job): + job = submit_job() + time.sleep(util.WAIT_SECS_SLURMCTLD) + + # Could check the logfile, but we just assume for now + # that when this function raises no Exception, everything worked. + job.notify("Hello Friends!") + + +def test_get_batch_script(submit_job): + script_body = create_simple_job_desc().script + job = submit_job() + + assert script_body == job.get_batch_script() + + +def test_get_job_queue(submit_job): + # Submit 10 jobs, gather the job_ids in a list + job_list = [submit_job() for i in range(10)] + + jobs = Jobs.load() + for job in job_list: + # Check to see if all the Jobs we submitted exist + assert job.id in jobs + assert isinstance(jobs[job.id], Job) + + +def test_get_resource_layout_per_node(submit_job): + # TODO + assert True diff --git a/tests/integration/test_job_steps.py b/tests/integration/test_job_steps.py new file mode 100644 index 00000000..4ad2de39 --- /dev/null +++ b/tests/integration/test_job_steps.py @@ -0,0 +1,180 @@ +######################################################################### +# test_job_steps.py - job steps api integration tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_job_steps.py - Test the job steps api functions.""" + +import pytest +import time +from pyslurm import ( + JobStep, + JobSteps, + RPCError, +) +import util + + +def create_job_script_multi_step(steps=None): + default = f""" + srun -n1 -N1 -c2 \ + -J step_zero --distribution=block:cyclic:block,Pack \ + sleep 300 & + srun -n1 -N1 -c3 \ + -t 10 -J step_one --distribution=block:cyclic:block,Pack \ + sleep 300 &""" + + job_script = f"""\ +#!/bin/bash + +echo "Got args: $@" + +/usr/bin/env + +{default if steps is None else steps} +wait +""" + return job_script + + +def test_load(submit_job): + job = submit_job(script=create_job_script_multi_step()) + + # Load the step info, waiting one second to make sure the Step + # actually exists. + time.sleep(util.WAIT_SECS_SLURMCTLD) + step = JobStep.load(job.id, "batch") + + assert step.id == "batch" + assert step.job_id == job.id + assert step.name == "batch" + # Job was submitted with ntasks=2, but the batch step always has just 1. + assert step.ntasks == 1 + # Job was submitted with a time-limit of 1 day, but it seems this doesn't + # propagate through for the steps if not set explicitly. + assert step.time_limit is None + + # Now try to load the first and second Step started by srun + step_zero = JobStep.load(job, 0) + step_one = JobStep.load(job, 1) + + # It is possible that the srun executed as the second command will + # become the Step with ID '0' - so we just swap it. + if step_zero.name == "step_one": + tmp = step_zero + step_zero = step_one + step_one = tmp + + assert step_one.id == 0 + assert step_zero.id == 1 + + step = step_zero + assert step.job_id == job.id + assert step.name == "step_zero" + assert step.ntasks == 1 + assert step.alloc_cpus == 2 + assert step.time_limit is None + + step = step_one + assert step.job_id == job.id + assert step.name == "step_one" + assert step.ntasks == 1 + assert step.alloc_cpus == 3 + assert step.time_limit == 10 + + +def test_collection(submit_job): + job = submit_job(script=create_job_script_multi_step()) + + time.sleep(util.WAIT_SECS_SLURMCTLD) + steps = JobSteps.load(job) + + assert steps != {} + # We have 3 Steps: batch, 0 and 1 + assert len(steps) == 3 + assert ("batch" in steps and + 0 in steps and + 1 in steps) + + +def test_cancel(submit_job): + job = submit_job(script=create_job_script_multi_step()) + + time.sleep(util.WAIT_SECS_SLURMCTLD) + steps = JobSteps.load(job) + assert len(steps) == 3 + assert ("batch" in steps and + 0 in steps and + 1 in steps) + + steps[0].cancel() + + time.sleep(util.WAIT_SECS_SLURMCTLD) + steps = JobSteps.load(job) + assert len(steps) == 2 + assert ("batch" in steps and + 1 in steps) + + +def test_modify(submit_job): + steps = "srun -t 20 sleep 100" + job = submit_job(script=create_job_script_multi_step(steps)) + + time.sleep(util.WAIT_SECS_SLURMCTLD) + step = JobStep.load(job, 0) + assert step.time_limit == 20 + + step.modify(JobStep(time_limit="00:05:00")) + assert JobStep.load(job, 0).time_limit == 5 + + step.modify(time_limit="00:15:00") + assert JobStep.load(job, 0).time_limit == 15 + + +def test_send_signal(submit_job): + steps = "srun -t 10 sleep 100" + job = submit_job(script=create_job_script_multi_step(steps)) + + time.sleep(util.WAIT_SECS_SLURMCTLD) + step = JobStep.load(job, 0) + assert step.state == "RUNNING" + + # Send a SIGTERM (basically cancelling the Job) + step.send_signal(15) + + # Make sure the job is actually cancelled. + # If a RPCError is raised, this means the Step got cancelled. + time.sleep(util.WAIT_SECS_SLURMCTLD) + with pytest.raises(RPCError): + step = JobStep.load(job, 0) + + +def test_load_with_wrong_step_id(submit_job): + job = submit_job() + + with pytest.raises(RPCError): + JobStep.load(job, 3) + + +def test_parse_all(submit_job): + job = submit_job() + + # Use the as_dict() function to test if parsing works for all + # properties on a simple JobStep without error. + time.sleep(util.WAIT_SECS_SLURMCTLD) + JobStep.load(job, "batch").as_dict() diff --git a/tests/integration/test_job_submit.py b/tests/integration/test_job_submit.py new file mode 100644 index 00000000..d2f7c98b --- /dev/null +++ b/tests/integration/test_job_submit.py @@ -0,0 +1,43 @@ +######################################################################### +# test_job_submit.py - job submit api integration tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_job_submit.py - Test the job submit api functions.""" + +import time +import pytest +import pyslurm +from os import environ as pyenviron +from util import create_simple_job_desc, create_job_script +from pyslurm import ( + Job, + Jobs, + JobSubmitDescription, + RPCError, +) + + +def test_submit_example1(): + # TODO + assert True + + +def test_submit_example2(): + # TODO + assert True diff --git a/tests/integration/test_node.py b/tests/integration/test_node.py new file mode 100644 index 00000000..3e1306da --- /dev/null +++ b/tests/integration/test_node.py @@ -0,0 +1,72 @@ +######################################################################### +# test_node.py - node api integration tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_node.py - Test the node api functions.""" + +import sys +import time +import pytest +import pyslurm +import os +from pyslurm import Node, Nodes, RPCError + + +def test_load(): + name = Nodes.load().as_list()[0].name + + # Now load the node info + node = Node.load(name) + assert node.name == name + assert node.weight is not None + assert node.slurm_version is not None + + with pytest.raises(RPCError, + match=f"Node 'nonexistent' does not exist"): + Node.load("nonexistent") + + +def test_create(): + node = Node("testhostpyslurm") + node.create() + + with pytest.raises(RPCError, + match=f"Invalid node state specified"): + Node("testhostpyslurm2").create("idle") + + +# def test_delete(): +# node = Node("testhost1").delete() + + +def test_modify(): + node = Node(Nodes.load().as_list()[0].name) + + node.modify(weight=10000) + assert Node.load(node.name).weight == 10000 + + node.modify(Node(weight=20000)) + assert Node.load(node.name).weight == 20000 + + node.modify(Node(weight=5000)) + assert Node.load(node.name).weight == 5000 + + +def test_parse_all(): + Node.load(Nodes.load().as_list()[0].name).as_dict() diff --git a/tests/integration/util.py b/tests/integration/util.py new file mode 100644 index 00000000..f5032f1a --- /dev/null +++ b/tests/integration/util.py @@ -0,0 +1,65 @@ +######################################################################### +# util.py - utility functions for tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import pytest +from pyslurm import ( + Job, + JobSubmitDescription, +) +import time + +# Horrendous, but works for now, because when testing against a real slurmctld +# we need to wait a bit for state changes (i.e. we cancel a job and +# immediately check after if the state is really "CANCELLED", but the state +# hasn't changed yet, so we need to wait a bit) +WAIT_SECS_SLURMCTLD = 3 + + +def wait(secs=WAIT_SECS_SLURMCTLD): + time.sleep(secs) + + +def create_job_script(): + job_script = """\ +#!/bin/bash + +echo "Got args: $@" + +/usr/bin/env + +sleep 500\ + +""" + return job_script + + +def create_simple_job_desc(script=None, **kwargs): + job = JobSubmitDescription(**kwargs) + + job.name = "test_job" + job.standard_output = "/tmp/slurm-test-%j.out" + job.memory_per_cpu = "1G" + job.ntasks = 2 + job.cpus_per_task = 3 + job.script = create_job_script() if not script else script + job.time_limit = "1-00:00:00" + + return job diff --git a/tests/unit/test_common.py b/tests/unit/test_common.py new file mode 100644 index 00000000..ca3f1cfd --- /dev/null +++ b/tests/unit/test_common.py @@ -0,0 +1,395 @@ +######################################################################### +# test_common.py - common utility tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_common.py - Test the most commonly used helper functions.""" + +import pyslurm +import pytest +import datetime +from pyslurm import Job, JobSubmitDescription, Node +from pyslurm.core.common.ctime import ( + timestr_to_mins, + timestr_to_secs, + mins_to_timestr, + secs_to_timestr, + date_to_timestamp, + timestamp_to_date, +) +from pyslurm.core.common.uint import ( + u8, + u16, + u32, + u64, + u8_parse, + u16_parse, + u32_parse, + u64_parse, +) +from pyslurm.core.common import ( + uid_to_name, + gid_to_name, + user_to_uid, + group_to_gid, + expand_range_str, + humanize, + dehumanize, + signal_to_num, + cpubind_to_num, + nodelist_from_range_str, + nodelist_to_range_str, + _sum_prop, +) +from pyslurm.core.common import cstr + + +class TestTypes: + + def test_strings(self): + n = Node() + + n.name = "Testing fmalloc string routines." + assert n.name == "Testing fmalloc string routines." + + n.name = None + assert n.name is None + + # Everything after a \0 will be cut off + n.name = "test1\0test2" + assert n.name == "test1" + + n.name = "\0" + assert n.name is None + + def test_lists(self): + n = Node() + input_as_list = ["test1", "test2", "test3", "test4"] + input_as_str = ",".join(input_as_list) + + n.available_features = input_as_list + assert n.available_features == input_as_list + + n.available_features = input_as_str + assert n.available_features == input_as_list + + n.available_features = [] + assert n.available_features == [] + + n.available_features = "" + assert n.available_features == [] + + n.available_features = None + assert n.available_features == [] + + def test_str_to_dict(self): + expected_dict = {"key1": "value1", "key2": "value2"} + input_str = "key1=value1,key2=value2" + assert cstr.to_dict(input_str) == expected_dict + assert cstr.to_dict("") == {} + + def test_dict_to_str(self): + input_dict = {"key1": "value1", "key2": "value2"} + expected_str = "key1=value1,key2=value2" + assert cstr.dict_to_str(input_dict) == expected_str + + input_dict = {"key1": "value1", "key2": "value2"} + expected_str = "key1=value1,key2=value2" + assert cstr.dict_to_str(input_dict) == expected_str + + expected_str = "key1-value1:key2-value2" + assert cstr.dict_to_str(input_dict, delim1=":", delim2="-") == expected_str + + input_dict = {"key1=": "value1", "key2": "value2"} + expected_str = "key1=value1,key2=value2" + with pytest.raises(ValueError, + match=r"Key or Value cannot contain either*"): + assert cstr.dict_to_str(input_dict) == expected_str + + expected_str = "key1=value1,key2=value2" + assert cstr.dict_to_str(expected_str) == expected_str + + assert cstr.dict_to_str({}) == None + assert cstr.dict_to_str("") == None + + def test_dict_to_gres_str(self): + input_dict = {"gpu:tesla": 3} + expected_str = "gres:gpu:tesla:3" + assert cstr.from_gres_dict(input_dict) == expected_str + assert cstr.from_gres_dict(expected_str) == expected_str + + input_dict = {"gpu": 3} + expected_str = "gres:gpu:3" + assert cstr.from_gres_dict(input_dict) == expected_str + assert cstr.from_gres_dict(expected_str) == expected_str + + def test_str_to_gres_dict(self): + assert True + + def _uint_impl(self, func_set, func_get, typ): + val = func_set(2**typ-2) + assert func_get(val) == None + + val = func_set(None) + assert func_get(val) == None + + val = func_set(str(2**typ-2)) + assert func_get(val) == None + + val = func_set("unlimited", inf=True) + assert func_get(val) == "unlimited" + + val = func_set(0) + assert func_get(val) == None + + val = func_set(0, zero_is_noval=False) + assert func_get(val, zero_is_noval=False) == 0 + + with pytest.raises(TypeError, + match="an integer is required"): + val = func_set("unlimited") + + with pytest.raises(OverflowError, + match=r"can't convert negative value to*"): + val = func_set(-1) + + with pytest.raises(OverflowError, + match=r"value too large to convert to*|" + "Python int too large*"): + val = func_set(2**typ) + + def test_u8(self): + self._uint_impl(u8, u8_parse, 8) + + def test_u16(self): + self._uint_impl(u16, u16_parse, 16) + + def test_u32(self): + self._uint_impl(u32, u32_parse, 32) + + def test_u64(self): + self._uint_impl(u64, u64_parse, 64) + +# def _uint_bool_impl(self, arg): +# js = JobSubmitDescription() + +# setattr(js, arg, True) +# assert getattr(js, arg) == True + +# setattr(js, arg, False) +# assert getattr(js, arg) == False + +# # Set to true again to make sure toggling actually works. +# setattr(js, arg, True) +# assert getattr(js, arg) == True + +# setattr(js, arg, None) +# assert getattr(js, arg) == False + +# def test_u8_bool(self): +# self._uint_bool_impl("overcommit") + +# def test_u16_bool(self): +# self._uint_bool_impl("requires_contiguous_nodes") + +# def test_u64_bool_flag(self): +# self._uint_bool_impl("kill_on_invalid_dependency") + + +class TestTime: + + def test_parse_minutes(self): + mins = 60 + mins_str = "01:00:00" + + assert timestr_to_mins(mins_str) == mins + assert timestr_to_mins("unlimited") == 2**32-1 + assert timestr_to_mins(None) == 2**32-2 + + assert mins_to_timestr(mins) == mins_str + assert mins_to_timestr(2**32-1) == "unlimited" + assert mins_to_timestr(2**32-2) == None + assert mins_to_timestr(0) == None + + with pytest.raises(ValueError, + match="Invalid Time Specification: invalid_val."): + timestr_to_mins("invalid_val") + + def test_parse_seconds(self): + secs = 3600 + secs_str = "01:00:00" + + assert timestr_to_secs(secs_str) == secs + assert timestr_to_secs("unlimited") == 2**32-1 + assert timestr_to_secs(None) == 2**32-2 + + assert secs_to_timestr(secs) == secs_str + assert secs_to_timestr(2**32-1) == "unlimited" + assert secs_to_timestr(2**32-2) == None + assert secs_to_timestr(0) == None + + with pytest.raises(ValueError, + match="Invalid Time Specification: invalid_val."): + timestr_to_secs("invalid_val") + + def test_parse_date(self): + timestamp = 1667941697 + date = "2022-11-08T21:08:17" + datetime_date = datetime.datetime(2022, 11, 8, 21, 8, 17) + + # Converting date str to timestamp with the slurm API functions may + # not yield the expected timestamp above due to using local time zone + assert date_to_timestamp(date) == timestamp + assert date_to_timestamp(timestamp) == timestamp + assert date_to_timestamp(datetime_date) == timestamp + + assert timestamp_to_date(timestamp) == date + assert timestamp_to_date(0) == None + assert timestamp_to_date(2**32-1) == None + assert timestamp_to_date(2**32-2) == None + + with pytest.raises(ValueError, + match="Invalid Time Specification: 2022-11-08T21"): + date_to_timestamp("2022-11-08T21") + +class TestMiscUtil: + + def test_parse_uid(self): + name = uid_to_name(0) + assert name == "root" + + lookup = {0: "root"} + name = uid_to_name(0, lookup=lookup) + assert name == "root" + + uid = user_to_uid("root") + assert uid == 0 + + with pytest.raises(KeyError): + name = uid_to_name(2**32-5) + + with pytest.raises(KeyError): + name = user_to_uid("invalid_user") + + def test_parse_gid(self): + name = gid_to_name(0) + assert name == "root" + + lookup = {0: "root"} + name = gid_to_name(0, lookup=lookup) + assert name == "root" + + gid = group_to_gid("root") + assert gid == 0 + + with pytest.raises(KeyError): + name = gid_to_name(2**32-5) + + with pytest.raises(KeyError): + name = group_to_gid("invalid_group") + + def test_expand_range_str(self): + r = expand_range_str("1-5,6,7,10-11") + assert r == [1, 2, 3, 4, 5, 6, 7, 10, 11] + + def test_humanize(self): + val = humanize(1024) + assert val == "1.0G" + + val = humanize(2**20) + assert val == "1.0T" + + val = humanize(800) + assert val == "800.0M" + + val = humanize("unlimited") + assert val == "unlimited" + + val = humanize(None) + assert val == None + + with pytest.raises(ValueError): + val = humanize("invalid_val") + + def test_dehumanize(self): + # Note: default target unit for dehumanize is "M". + val = dehumanize(1024) + assert val == 1024 + + val = dehumanize("2M") + assert val == 2 + + val = dehumanize("10G") + assert val == 10240 + + val = dehumanize("9.6G") + assert val == round(1024*9.6) + + val = dehumanize("10T") + assert val == 10*(2**20) + + val = dehumanize("10T", target="G") + assert val == 10*(2**10) + + with pytest.raises(ValueError, + match="Invalid value specified: 10L"): + val = dehumanize("10L") + + with pytest.raises(ValueError, + match="could not convert string to float: 'invalid_val'"): + val = dehumanize("invalid_valM") + + def test_signal_to_num(self): + sig = signal_to_num("SIGKILL") + assert sig == 9 + + sig = signal_to_num(7) + assert sig == 7 + + with pytest.raises(ValueError): + sig = signal_to_num("invalid_sig") + + def test_nodelist_from_range_str(self): + nodelist = ["node001", "node007", "node008", "node009"] + nodelist_str = ",".join(nodelist) + assert nodelist == nodelist_from_range_str("node[001,007-009]") + assert nodelist_from_range_str("node[001,007:009]") == [] + + def test_nodelist_to_range_str(self): + nodelist = ["node001", "node007", "node008", "node009"] + nodelist_str = ",".join(nodelist) + assert "node[001,007-009]" == nodelist_to_range_str(nodelist) + assert "node[001,007-009]" == nodelist_to_range_str(nodelist_str) + + def test_summarize_property(self): + class TestObject: + @property + def memory(self): + return 10240 + + @property + def cpus(self): + return None + + object_dict = {i: TestObject() for i in range(10)} + + expected = 10240 * 10 + assert _sum_prop(object_dict, TestObject.memory) == expected + + expected = 0 + assert _sum_prop(object_dict, TestObject.cpus) == 0 diff --git a/tests/unit/test_db_job.py b/tests/unit/test_db_job.py new file mode 100644 index 00000000..43ea5227 --- /dev/null +++ b/tests/unit/test_db_job.py @@ -0,0 +1,52 @@ +######################################################################### +# test_db_job.py - database job unit tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_db_job.py - Unit test basic database job functionalities.""" + +import pytest +import pyslurm + + +def test_search_filter(): + job_filter = pyslurm.db.JobSearchFilter() + + job_filter.clusters = ["test1"] + job_filter.partitions = ["partition1", "partition2"] + job_filter._create() + + job_filter.ids = [1000, 1001] + job_filter._create() + + job_filter.with_script = True + job_filter._create() + + job_filter.with_env = True + with pytest.raises(ValueError): + job_filter._create() + + +def test_collection_init(): + # TODO + assert True + + +def test_create_instance(): + job = pyslurm.db.Job(9999) + assert job.id == 9999 diff --git a/tests/unit/test_db_qos.py b/tests/unit/test_db_qos.py new file mode 100644 index 00000000..acf12fea --- /dev/null +++ b/tests/unit/test_db_qos.py @@ -0,0 +1,49 @@ +######################################################################### +# test_db_qos.py - database qos unit tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_db_qos.py - Unit test basic database qos functionalities.""" + +import pytest +import pyslurm + + +def test_search_filter(): + qos_filter = pyslurm.db.QualityOfServiceSearchFilter() + qos_filter._create() + + qos_filter.ids = [1, 2] + qos_filter._create() + + qos_filter.preempt_modes = ["cluster"] + qos_filter._create() + + with pytest.raises(ValueError): + qos_filter.preempt_modes = ["invalid_preempt_mode"] + qos_filter._create() + + +def test_create_collection_instance(): + # TODO + assert True + + +def test_create_instance(): + qos = pyslurm.db.QualityOfService("test") + assert qos.name == "test" diff --git a/tests/unit/test_db_slurm_list.py b/tests/unit/test_db_slurm_list.py new file mode 100644 index 00000000..41df371c --- /dev/null +++ b/tests/unit/test_db_slurm_list.py @@ -0,0 +1,134 @@ +######################################################################### +# test_db_slurm_list.py - Slurm list tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_db_slurm_List.py - Unit test basic Slurm list functionalities.""" + +import pytest +import pyslurm +from pyslurm.core.db.util import SlurmList + + +def test_create_and_destroy_list(): + slist = SlurmList() + assert not slist.is_null + + slist2 = SlurmList(["user1", "user2"]) + assert not slist.is_null + assert slist2.cnt == 2 + assert slist2.itr_cnt == 0 + assert slist2.is_itr_null + + slist2._dealloc_itr() + slist2._dealloc_list() + assert slist2.is_null + + +def test_append(): + slist = SlurmList() + input_list = ["user1", "user2", "user3"] + slist.append(input_list) + assert slist.cnt == len(input_list) + + input_str = "user4" + slist.append(input_str) + assert slist.cnt == 4 + + input_int = 10 + slist.append(input_int) + assert slist.cnt == 5 + + input_ignore_none = ["user6", None] + slist.append(input_ignore_none) + assert slist.cnt == 6 + + +def test_convert_to_pylist(): + input_list = ["user1", "user2", "user3"] + slist = SlurmList(input_list) + assert slist.cnt == 3 + assert slist.to_pylist() == input_list + + +def test_iter(): + input_list = ["user1", "user2", "user3"] + slist = SlurmList(input_list) + assert slist.itr_cnt == 0 + assert slist.is_itr_null + assert not slist.is_null + assert slist.cnt == 3 + + for idx, slurm_item in enumerate(slist): + assert not slist.is_itr_null + assert slurm_item.has_data + assert slist.itr_cnt == idx+1 + + assert slist.itr_cnt == 0 + assert slist.is_itr_null + + slist._dealloc_list() + assert slist.is_null + assert slist.cnt == 0 + + for item in slist: + # Should not be possible to get here + assert False + + +def test_iter_and_pop(): + input_list = ["user1", "user2", "user3"] + slist = SlurmList(input_list) + assert slist.itr_cnt == 0 + assert slist.is_itr_null + assert slist.cnt == 3 + + for idx, slurm_item in enumerate(SlurmList.iter_and_pop(slist)): + assert slist.is_itr_null + assert slurm_item.has_data + + assert slist.cnt == 0 + assert slist.itr_cnt == 0 + assert slist.is_itr_null + + # Round 2 on existing object + slist.append(["user10", "user11"]) + assert slist.itr_cnt == 0 + assert slist.cnt == 2 + + for slurm_item in SlurmList.iter_and_pop(slist): + assert slurm_item.has_data + + assert slist.cnt == 0 + assert slist.itr_cnt == 0 + assert slist.is_itr_null + + +def test_iter_and_pop_on_null_list(): + input_list = ["user1", "user2", "user3"] + slist = SlurmList(input_list) + assert not slist.is_null + assert slist.cnt == 3 + + slist._dealloc_list() + assert slist.is_null + assert slist.cnt == 0 + + for slurm_item in SlurmList.iter_and_pop(slist): + # Should not be possible to get here + assert False diff --git a/tests/unit/test_job.py b/tests/unit/test_job.py new file mode 100644 index 00000000..edcf65d4 --- /dev/null +++ b/tests/unit/test_job.py @@ -0,0 +1,74 @@ +######################################################################### +# test_job.py - job unit tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_job.py - Unit test basic job functionalities.""" + +import pytest +import pyslurm +from pyslurm import Job +from pyslurm.core.job.util import * + +def test_create_instance(): + job = Job(9999) + assert job.id == 9999 + + +def test_parse_all(): + # Use the as_dict() function to test if parsing works for all + # properties on a simple Job without error. + Job(9999).as_dict() + + +def test_parse_dependencies_to_dict(): + expected = None + assert dependency_str_to_dict("") == expected + + expected = { + "after": [1, 2], + "afterany": [], + "afterburstbuffer": [], + "aftercorr": [], + "afternotok": [], + "afterok": [3], + "singleton": False, + "satisfy": "all", + } + input_str = "after:1:2,afterok:3" + assert dependency_str_to_dict(input_str) == expected + + +def test_mail_types_int_to_list(): + expected = [] + assert mail_type_int_to_list(0) == expected + + +def test_acctg_profile_int_to_list(): + expected = [] + assert acctg_profile_int_to_list(0) == expected + + +def test_power_type_int_to_list(): + expected = [] + assert power_type_int_to_list(0) == expected + + +def test_cpu_freq_int_to_str(): + expected = None + assert cpu_freq_int_to_str(0) == expected diff --git a/tests/unit/test_job_steps.py b/tests/unit/test_job_steps.py new file mode 100644 index 00000000..c222ef34 --- /dev/null +++ b/tests/unit/test_job_steps.py @@ -0,0 +1,44 @@ +######################################################################### +# test_job_steps.py - job steps unit tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_job_steps.py - Unit test basic job step functionality.""" + +import pytest +from pyslurm import JobStep, Job +from pyslurm.core.job.step import ( + humanize_step_id, + dehumanize_step_id, +) + +def test_create_instance(): + step = JobStep(9999, 1) + assert step.id == 1 + assert step.job_id == 9999 + + job = Job(10000) + step2 = JobStep(job, 2) + assert step2.id == 2 + assert step2.job_id == 10000 + + +def test_parse_all(): + # Use the as_dict() function to test if parsing works for all + # properties on a simple JobStep without error. + JobStep(9999, 1).as_dict() diff --git a/tests/unit/test_job_submit.py b/tests/unit/test_job_submit.py new file mode 100644 index 00000000..d0daf41b --- /dev/null +++ b/tests/unit/test_job_submit.py @@ -0,0 +1,306 @@ +######################################################################### +# test_job_submit.py - job submission unit tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_job_submit.py - Test the job submit api functions.""" + +import sys +import time +import pytest +import pyslurm +import tempfile +import os +from os import environ as pyenviron +from util import create_simple_job_desc, create_job_script +from pyslurm import ( + Job, + Jobs, + JobSubmitDescription, + RPCError, +) + +def job_desc(**kwargs): + return JobSubmitDescription(script=create_job_script(), **kwargs) + + +def test_environment(): + job = job_desc() + + # Everything in the current environment will be exported + job.environment = "ALL" + job._create_job_submit_desc() + + # Only SLURM_* Vars from the current env will be exported + job.environment = "NONE" + job._create_job_submit_desc() + + # TODO: more test cases + # Test explicitly set vars as dict +# job.environment = { +# "PYSLURM_TEST_VAR_1": 2, +# "PYSLURM_TEST_VAR_2": "test-value", +# } + + +def test_cpu_frequencyuency(): + job = job_desc() + job._create_job_submit_desc() + + job.cpu_frequency = "Performance" + job._create_job_submit_desc() + + job.cpu_frequency = {"governor": "Performance"} + job._create_job_submit_desc() + + job.cpu_frequency = 1000000 + job._create_job_submit_desc() + + job.cpu_frequency = {"max": 1000000} + job._create_job_submit_desc() + + job.cpu_frequency = "1000000-3700000" + job._create_job_submit_desc() + + job.cpu_frequency = {"min": 1000000, "max": 3700000} + job._create_job_submit_desc() + + job.cpu_frequency = "1000000-3700000:Performance" + job._create_job_submit_desc() + + job.cpu_frequency = {"min": 1000000, "max": 3700000, + "governor": "Performance"} + job._create_job_submit_desc() + + with pytest.raises(ValueError, + match=r"Invalid cpu_frequency format*"): + job.cpu_frequency = "Performance:3700000" + job._create_job_submit_desc() + + with pytest.raises(ValueError, + match=r"min cpu-freq*"): + job.cpu_frequency = "4000000-3700000" + job._create_job_submit_desc() + + with pytest.raises(ValueError, + match=r"Invalid cpu freq value*"): + job.cpu_frequency = "3700000:Performance" + job._create_job_submit_desc() + + with pytest.raises(ValueError, + match=r"Setting Governor when specifying*"): + job.cpu_frequency = {"max": 3700000, "governor": "Performance"} + job._create_job_submit_desc() + + with pytest.raises(ValueError, + match=r"Setting Governor when specifying*"): + job.cpu_frequency = {"min": 3700000, "governor": "Performance"} + job._create_job_submit_desc() + + +def test_nodes(): + job = job_desc() + job._create_job_submit_desc() + + job.nodes = "5" + job._create_job_submit_desc() + + job.nodes = {"min": 5, "max": 5} + job._create_job_submit_desc() + + job.nodes = "5-10" + job._create_job_submit_desc() + + job.nodes = {"min": 5, "max": 10} + job._create_job_submit_desc() + + with pytest.raises(ValueError, + match=r"Max Nodecount cannot be less than*"): + job.nodes = {"min": 10, "max": 5} + job._create_job_submit_desc() + + +def test_script(): + job = job_desc() + script = create_job_script() + job._create_job_submit_desc() + + job.script = script + assert job.script == script + assert job.script_args is None + + # Try passing in a path to a script. + fd, path = tempfile.mkstemp() + try: + with os.fdopen(fd, 'w') as tmp: + tmp.write(script) + + job.script = path + job.script_args = "-t 10 input.csv" + job._create_job_submit_desc() + finally: + os.remove(path) + + with pytest.raises(ValueError, + match=r"Passing arguments to a script*"): + job.script = "#!/bin/bash\nsleep 10" + job.script_args = "-t 10" + job._create_job_submit_desc() + + with pytest.raises(ValueError, + match=r"The Slurm Controller does not allow*"): + job.script = script + "\0" + job.script_args = None + job._create_job_submit_desc() + + with pytest.raises(ValueError, + match="You need to provide a batch script."): + job.script = "" + job.script_args = None + job._create_job_submit_desc() + + with pytest.raises(ValueError, + match=r"Batch script contains DOS line breaks*"): + job.script = script + "\r\n" + job.script_args = None + job._create_job_submit_desc() + + +def test_dependencies(): + job = job_desc() + job._create_job_submit_desc() + + job.dependencies = "after:70:90:60+30,afterok:80" + job._create_job_submit_desc() + + job.dependencies = "after:70:90:60?afterok:80" + job._create_job_submit_desc() + + job.dependencies = { + "afterany": [40, 30, 20], + "afternotok": [100], + "satisfy": "any", + "singleton": True, + } + job._create_job_submit_desc() + + +def test_cpus(): + job = job_desc() + job._create_job_submit_desc() + + job.cpus_per_task = 5 + job._create_job_submit_desc() + + with pytest.raises(ValueError, + match="cpus_per_task and cpus_per_gpu are mutually exclusive."): + job.cpus_per_gpu = 5 + job._create_job_submit_desc() + + job.cpus_per_task = None + job.cpus_per_gpu = 5 + job._create_job_submit_desc() + + with pytest.raises(ValueError, + match="cpus_per_task and cpus_per_gpu are mutually exclusive."): + job.cpus_per_task = 5 + job._create_job_submit_desc() + + +def test_gres_per_node(): + job = job_desc() + job._create_job_submit_desc() + + job.gres_per_node = "gpu:tesla:1,gpu:volta:5" + job._create_job_submit_desc() + + job.gres_per_node = {"gpu:tesla": 1, "gpu:volta": 1} + job._create_job_submit_desc() + + +def test_signal(): + job = job_desc() + job._create_job_submit_desc() + + job.signal = 7 + job._create_job_submit_desc() + + job.signal = {"batch_only": True} + job._create_job_submit_desc() + + job.signal = "7@120" + job._create_job_submit_desc() + + job.signal = "RB:8@180" + job._create_job_submit_desc() + + +def test_setting_attrs_with_env_vars(): + pyenviron["PYSLURM_JOBDESC_ACCOUNT"] = "account1" + pyenviron["PYSLURM_JOBDESC_NAME"] = "jobname" + pyenviron["PYSLURM_JOBDESC_WCKEY"] = "wckey" + pyenviron["PYSLURM_JOBDESC_CLUSTERS"] = "cluster1,cluster2" + pyenviron["PYSLURM_JOBDESC_COMMENT"] = "A simple job comment" + pyenviron["PYSLURM_JOBDESC_REQUIRES_CONTIGUOUS_NODES"] = "True" + pyenviron["PYSLURM_JOBDESC_WORKING_DIRECTORY"] = "/work/user1" + + job = job_desc(working_directory="/work/user2") + job.load_environment() + + assert job.account == "account1" + assert job.name == "jobname" + assert job.wckey == "wckey" + assert job.clusters == "cluster1,cluster2" + assert job.comment == "A simple job comment" + assert job.working_directory == "/work/user2" + assert job.requires_contiguous_nodes == True + job._create_job_submit_desc() + + +def test_parsing_sbatch_options_from_script(): + job = job_desc(working_directory="/work/user2") + + fd, path = tempfile.mkstemp() + try: + with os.fdopen(fd, 'w') as tmp: + tmp.write( + """#!/bin/bash + + #SBATCH --time 20 + #SBATCH --mem-per-cpu =1G + #SBATCH -G 1 + #SBATCH --exclusive + #SBATCH --ntasks = 2 + #SBATCH -c=3 # inline-comments should be ignored + + sleep 1000 + """ + ) + + job.script = path + job.load_sbatch_options() + assert job.time_limit == "20" + assert job.memory_per_cpu == "1G" + assert job.gpus == "1" + assert job.resource_sharing == "no" + assert job.ntasks == "2" + assert job.cpus_per_task == "3" + job._create_job_submit_desc() + finally: + os.remove(path) + diff --git a/tests/unit/test_node.py b/tests/unit/test_node.py new file mode 100644 index 00000000..2caf8d37 --- /dev/null +++ b/tests/unit/test_node.py @@ -0,0 +1,44 @@ +######################################################################### +# test_node.py - node unit tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_node.py - Unit Test basic functionality of the Node class.""" + +import pytest +import pyslurm +from pyslurm import Node, Nodes + + +def test_create_instance(): + node = Node("localhost") + assert node.name == "localhost" + + +def test_parse_all(): + Node("localhost").as_dict() + + +def test_create_nodes_collection(): + # TODO + assert True + + +def test_setting_attributes(): + # TODO + assert True diff --git a/tests/unit/test_task_dist.py b/tests/unit/test_task_dist.py new file mode 100644 index 00000000..52a3e07c --- /dev/null +++ b/tests/unit/test_task_dist.py @@ -0,0 +1,52 @@ +######################################################################### +# test_task_dist.py - task distribution unit tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_task_dist.py - Test task distribution functions.""" + +import pyslurm +from pyslurm.core.job.task_dist import TaskDistribution + + +def test_from_int(): + expected = None + assert TaskDistribution.from_int(0) == expected + + +def test_from_str(): + + input_str = "cyclic:cyclic:cyclic" + expected = TaskDistribution("cyclic", "cyclic", "cyclic") + parsed = TaskDistribution.from_str(input_str) + assert parsed == expected + assert parsed.to_str() == input_str + + input_str = "*:*:fcyclic,NoPack" + expected = TaskDistribution("*", "*", "fcyclic", False) + parsed = TaskDistribution.from_str(input_str) + assert parsed == expected + assert parsed.to_str() == "block:cyclic:fcyclic,NoPack" + + input_plane_size = 10 + expected = TaskDistribution(plane_size=input_plane_size) + parsed = TaskDistribution.from_str(f"plane={input_plane_size}") + assert parsed == expected + assert parsed.to_str() == "plane" + assert parsed.plane == 10 +# assert parsed.as_int() == pyslurm.SLURM_DIST_PLANE diff --git a/tests/unit/util.py b/tests/unit/util.py new file mode 100644 index 00000000..d142a3a4 --- /dev/null +++ b/tests/unit/util.py @@ -0,0 +1,56 @@ +######################################################################### +# util.py - utility functions for tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import pytest +from pyslurm import ( + Job, + JobSubmitDescription, +) + +# TODO: Figure out how to share this properly between the unit and integration +# folders + +def create_job_script(): + job_script = """\ +#!/bin/bash + +echo "Got args: $@" + +/usr/bin/env + +sleep 500\ + +""" + return job_script + + +def create_simple_job_desc(script=None, **kwargs): + job = JobSubmitDescription(**kwargs) + + job.name = "test_job" + job.standard_output = "/tmp/slurm-test-%j.out" + job.memory_per_cpu = "1G" + job.ntasks = 2 + job.cpus_per_task = 3 + job.script = create_job_script() if not script else script + job.time_limit = "1-00:00:00" + + return job diff --git a/valgrind-pyslurm.supp b/valgrind-pyslurm.supp new file mode 100644 index 00000000..d7243f44 --- /dev/null +++ b/valgrind-pyslurm.supp @@ -0,0 +1,544 @@ +# Initial suppression file taken from here: +# https://github.com/python/cpython/blob/77a3196b7cc17d90a8aae5629aa71ff183b9266a/Misc/valgrind-python.supp +# Extended with Slurm specific suppressions + +{ + Python _PyFunction_Vectorcall + Memcheck:Leak + match-leak-kinds: possible + fun:malloc + fun:_PyObject_GC_NewVar + obj:/usr/bin/python3.10 + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall +} + +### +### IGNORE POSSIBLE LEAKS CAUSED BY SOME INIT FUNCTIONS IN libslurm +### + +{ + Slurm select_g_init + Memcheck:Leak + match-leak-kinds: possible + ... + fun:select_g_init + ... +} + +{ + Slurm slurm_auth_init + Memcheck:Leak + match-leak-kinds: possible + ... + fun:slurm_auth_init + ... +} + +{ + Slurm slurm_conf_init/slurm_init + Memcheck:Leak + match-leak-kinds: possible + ... + fun:slurm_conf_init + fun:slurm_init + ... +} + +{ + Slurm hash_g_init + Memcheck:Leak + match-leak-kinds: possible + ... + fun:hash_g_init + ... +} + +{ + ADDRESS_IN_RANGE/Invalid read of size 4 + Memcheck:Addr4 + fun:address_in_range +} + +{ + ADDRESS_IN_RANGE/Invalid read of size 4 + Memcheck:Value4 + fun:address_in_range +} + +{ + ADDRESS_IN_RANGE/Invalid read of size 8 (x86_64 aka amd64) + Memcheck:Value8 + fun:address_in_range +} + +{ + ADDRESS_IN_RANGE/Conditional jump or move depends on uninitialised value + Memcheck:Cond + fun:address_in_range +} + +# +# Leaks (including possible leaks) +# Hmmm, I wonder if this masks some real leaks. I think it does. +# Will need to fix that. +# + +{ + Suppress leaking the GIL. Happens once per process, see comment in ceval.c. + Memcheck:Leak + fun:malloc + fun:PyThread_allocate_lock + fun:PyEval_InitThreads +} + +{ + Suppress leaking the GIL after a fork. + Memcheck:Leak + fun:malloc + fun:PyThread_allocate_lock + fun:PyEval_ReInitThreads +} + +{ + Suppress leaking the autoTLSkey. This looks like it shouldn't leak though. + Memcheck:Leak + fun:malloc + fun:PyThread_create_key + fun:_PyGILState_Init + fun:Py_InitializeEx + fun:Py_Main +} + +{ + Hmmm, is this a real leak or like the GIL? + Memcheck:Leak + fun:malloc + fun:PyThread_ReInitTLS +} + +{ + Handle PyMalloc confusing valgrind (possibly leaked) + Memcheck:Leak + fun:realloc + fun:_PyObject_GC_Resize + fun:COMMENT_THIS_LINE_TO_DISABLE_LEAK_WARNING +} + +{ + Handle PyMalloc confusing valgrind (possibly leaked) + Memcheck:Leak + fun:malloc + fun:_PyObject_GC_New + fun:COMMENT_THIS_LINE_TO_DISABLE_LEAK_WARNING +} + +{ + Handle PyMalloc confusing valgrind (possibly leaked) + Memcheck:Leak + fun:malloc + fun:_PyObject_GC_NewVar + fun:COMMENT_THIS_LINE_TO_DISABLE_LEAK_WARNING +} + +# +# Non-python specific leaks +# + +{ + Handle pthread issue (possibly leaked) + Memcheck:Leak + fun:calloc + fun:allocate_dtv + fun:_dl_allocate_tls_storage + fun:_dl_allocate_tls +} + +{ + Handle pthread issue (possibly leaked) + Memcheck:Leak + fun:memalign + fun:_dl_allocate_tls_storage + fun:_dl_allocate_tls +} + +{ + ADDRESS_IN_RANGE/Invalid read of size 4 + Memcheck:Addr4 + fun:_PyObject_Free +} + +{ + ADDRESS_IN_RANGE/Invalid read of size 4 + Memcheck:Value4 + fun:_PyObject_Free +} + +{ + ADDRESS_IN_RANGE/Use of uninitialised value of size 8 + Memcheck:Addr8 + fun:_PyObject_Free +} + +{ + ADDRESS_IN_RANGE/Use of uninitialised value of size 8 + Memcheck:Value8 + fun:_PyObject_Free +} + +{ + ADDRESS_IN_RANGE/Conditional jump or move depends on uninitialised value + Memcheck:Cond + fun:_PyObject_Free +} + +{ + ADDRESS_IN_RANGE/Invalid read of size 4 + Memcheck:Addr4 + fun:_PyObject_Realloc +} + +{ + ADDRESS_IN_RANGE/Invalid read of size 4 + Memcheck:Value4 + fun:_PyObject_Realloc +} + +{ + ADDRESS_IN_RANGE/Use of uninitialised value of size 8 + Memcheck:Addr8 + fun:_PyObject_Realloc +} + +{ + ADDRESS_IN_RANGE/Use of uninitialised value of size 8 + Memcheck:Value8 + fun:_PyObject_Realloc +} + +{ + ADDRESS_IN_RANGE/Conditional jump or move depends on uninitialised value + Memcheck:Cond + fun:_PyObject_Realloc +} + +### +### All the suppressions below are for errors that occur within libraries +### that Python uses. The problems to not appear to be related to Python's +### use of the libraries. +### + +{ + Generic ubuntu ld problems + Memcheck:Addr8 + obj:/lib/ld-2.4.so + obj:/lib/ld-2.4.so + obj:/lib/ld-2.4.so + obj:/lib/ld-2.4.so +} + +{ + Generic gentoo ld problems + Memcheck:Cond + obj:/lib/ld-2.3.4.so + obj:/lib/ld-2.3.4.so + obj:/lib/ld-2.3.4.so + obj:/lib/ld-2.3.4.so +} + +{ + DBM problems, see test_dbm + Memcheck:Param + write(buf) + fun:write + obj:/usr/lib/libdb1.so.2 + obj:/usr/lib/libdb1.so.2 + obj:/usr/lib/libdb1.so.2 + obj:/usr/lib/libdb1.so.2 + fun:dbm_close +} + +{ + DBM problems, see test_dbm + Memcheck:Value8 + fun:memmove + obj:/usr/lib/libdb1.so.2 + obj:/usr/lib/libdb1.so.2 + obj:/usr/lib/libdb1.so.2 + obj:/usr/lib/libdb1.so.2 + fun:dbm_store + fun:dbm_ass_sub +} + +{ + DBM problems, see test_dbm + Memcheck:Cond + obj:/usr/lib/libdb1.so.2 + obj:/usr/lib/libdb1.so.2 + obj:/usr/lib/libdb1.so.2 + fun:dbm_store + fun:dbm_ass_sub +} + +{ + DBM problems, see test_dbm + Memcheck:Cond + fun:memmove + obj:/usr/lib/libdb1.so.2 + obj:/usr/lib/libdb1.so.2 + obj:/usr/lib/libdb1.so.2 + obj:/usr/lib/libdb1.so.2 + fun:dbm_store + fun:dbm_ass_sub +} + +{ + GDBM problems, see test_gdbm + Memcheck:Param + write(buf) + fun:write + fun:gdbm_open + +} + +{ + Uninitialised byte(s) false alarm, see bpo-35561 + Memcheck:Param + epoll_ctl(event) + fun:epoll_ctl + fun:pyepoll_internal_ctl +} + +{ + ZLIB problems, see test_gzip + Memcheck:Cond + obj:/lib/libz.so.1.2.3 + obj:/lib/libz.so.1.2.3 + fun:deflate +} + +{ + Avoid problems w/readline doing a putenv and leaking on exit + Memcheck:Leak + fun:malloc + fun:xmalloc + fun:sh_set_lines_and_columns + fun:_rl_get_screen_size + fun:_rl_init_terminal_io + obj:/lib/libreadline.so.4.3 + fun:rl_initialize +} + +# Valgrind emits "Conditional jump or move depends on uninitialised value(s)" +# false alarms on GCC builtin strcmp() function. The GCC code is correct. +# +# Valgrind bug: https://bugs.kde.org/show_bug.cgi?id=264936 +{ + bpo-38118: Valgrind emits false alarm on GCC builtin strcmp() + Memcheck:Cond + fun:PyUnicode_Decode +} + + +### +### These occur from somewhere within the SSL, when running +### test_socket_sll. They are too general to leave on by default. +### +###{ +### somewhere in SSL stuff +### Memcheck:Cond +### fun:memset +###} +###{ +### somewhere in SSL stuff +### Memcheck:Value4 +### fun:memset +###} +### +###{ +### somewhere in SSL stuff +### Memcheck:Cond +### fun:MD5_Update +###} +### +###{ +### somewhere in SSL stuff +### Memcheck:Value4 +### fun:MD5_Update +###} + +# Fedora's package "openssl-1.0.1-0.1.beta2.fc17.x86_64" on x86_64 +# See http://bugs.python.org/issue14171 +{ + openssl 1.0.1 prng 1 + Memcheck:Cond + fun:bcmp + fun:fips_get_entropy + fun:FIPS_drbg_instantiate + fun:RAND_init_fips + fun:OPENSSL_init_library + fun:SSL_library_init + fun:init_hashlib +} + +{ + openssl 1.0.1 prng 2 + Memcheck:Cond + fun:fips_get_entropy + fun:FIPS_drbg_instantiate + fun:RAND_init_fips + fun:OPENSSL_init_library + fun:SSL_library_init + fun:init_hashlib +} + +{ + openssl 1.0.1 prng 3 + Memcheck:Value8 + fun:_x86_64_AES_encrypt_compact + fun:AES_encrypt +} + +# +# All of these problems come from using test_socket_ssl +# +{ + from test_socket_ssl + Memcheck:Cond + fun:BN_bin2bn +} + +{ + from test_socket_ssl + Memcheck:Cond + fun:BN_num_bits_word +} + +{ + from test_socket_ssl + Memcheck:Value4 + fun:BN_num_bits_word +} + +{ + from test_socket_ssl + Memcheck:Cond + fun:BN_mod_exp_mont_word +} + +{ + from test_socket_ssl + Memcheck:Cond + fun:BN_mod_exp_mont +} + +{ + from test_socket_ssl + Memcheck:Param + write(buf) + fun:write + obj:/usr/lib/libcrypto.so.0.9.7 +} + +{ + from test_socket_ssl + Memcheck:Cond + fun:RSA_verify +} + +{ + from test_socket_ssl + Memcheck:Value4 + fun:RSA_verify +} + +{ + from test_socket_ssl + Memcheck:Value4 + fun:DES_set_key_unchecked +} + +{ + from test_socket_ssl + Memcheck:Value4 + fun:DES_encrypt2 +} + +{ + from test_socket_ssl + Memcheck:Cond + obj:/usr/lib/libssl.so.0.9.7 +} + +{ + from test_socket_ssl + Memcheck:Value4 + obj:/usr/lib/libssl.so.0.9.7 +} + +{ + from test_socket_ssl + Memcheck:Cond + fun:BUF_MEM_grow_clean +} + +{ + from test_socket_ssl + Memcheck:Cond + fun:memcpy + fun:ssl3_read_bytes +} + +{ + from test_socket_ssl + Memcheck:Cond + fun:SHA1_Update +} + +{ + from test_socket_ssl + Memcheck:Value4 + fun:SHA1_Update +} + +{ + test_buffer_non_debug + Memcheck:Addr4 + fun:PyUnicodeUCS2_FSConverter +} + +{ + test_buffer_non_debug + Memcheck:Addr4 + fun:PyUnicode_FSConverter +} + +{ + wcscmp_false_positive + Memcheck:Addr8 + fun:wcscmp + fun:_PyOS_GetOpt + fun:Py_Main + fun:main +} + +# Additional suppressions for the unified decimal tests: +{ + test_decimal + Memcheck:Addr4 + fun:PyUnicodeUCS2_FSConverter +} + +{ + test_decimal2 + Memcheck:Addr4 + fun:PyUnicode_FSConverter +} + From b81d491aec980aacc2be775dac229e0a2894f2db Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Fri, 5 May 2023 17:53:13 +0200 Subject: [PATCH 17/48] Update documentation and do some restructuring (#285) * update README - include logo - remove testing section (maybe it back separetely in the documentation) - remove workflow-badge (will only fail anyway, add back later when the building process has been updated) * update the documentation * move pyslurm.core.common package to pyslurm.utils * move pyslurm.core.db package to pyslurm.db * rename JobStats to JobStatistics --- README.md | 128 +++--------------- doc_requirements.txt | 1 + docs/index.md | 53 +------- docs/logo.png | 1 + docs/reference/.pages | 3 + docs/reference/config.md | 10 ++ docs/reference/db/.pages | 3 + docs/reference/db/cluster.md | 10 ++ docs/reference/db/connection.md | 6 + docs/reference/db/event.md | 10 ++ docs/reference/db/index.md | 4 + docs/reference/db/job.md | 13 ++ docs/reference/db/jobsearchfilter.md | 6 + docs/reference/db/jobstats.md | 6 + docs/reference/db/jobstep.md | 9 ++ docs/reference/db/reservation.md | 10 ++ docs/reference/exceptions.md | 9 ++ docs/reference/frontend.md | 10 ++ docs/reference/hostlist.md | 10 ++ docs/reference/index.md | 48 +++++++ docs/reference/job.md | 13 ++ docs/reference/jobstep.md | 13 ++ docs/reference/jobsubmitdescription.md | 6 + docs/reference/node.md | 13 ++ docs/reference/old/.pages | 3 + docs/reference/old/db/.pages | 3 + docs/reference/old/db/job.md | 10 ++ docs/reference/old/job.md | 10 ++ docs/reference/old/jobstep.md | 10 ++ docs/reference/old/node.md | 10 ++ docs/reference/partition.md | 10 ++ docs/reference/reservation.md | 10 ++ docs/reference/statistics.md | 10 ++ docs/reference/topology.md | 10 ++ docs/reference/trigger.md | 10 ++ docs/reference/utilities.md | 39 ++++++ docs/stylesheets/extra.css | 2 +- docs/pyslurm-docs.png => logo.png | Bin mkdocs.yml | 62 +++++++-- pyslurm/__init__.py | 41 +----- pyslurm/api.pxd | 2 +- pyslurm/core/error.pyx | 15 +- pyslurm/core/job/job.pxd | 14 +- pyslurm/core/job/job.pyx | 93 ++++++------- pyslurm/core/job/sbatch_opts.pyx | 4 +- pyslurm/core/job/step.pxd | 17 ++- pyslurm/core/job/step.pyx | 66 ++++----- pyslurm/core/job/submission.pxd | 10 +- pyslurm/core/job/submission.pyx | 32 ++--- pyslurm/core/job/task_dist.pxd | 2 +- pyslurm/core/job/util.pyx | 4 +- pyslurm/core/node.pxd | 46 +++++-- pyslurm/core/node.pyx | 72 ++++------ pyslurm/core/slurmctld.pxd | 4 +- pyslurm/{core => }/db/__init__.pxd | 0 pyslurm/{core => }/db/__init__.py | 13 +- pyslurm/{core => }/db/connection.pxd | 0 pyslurm/{core => }/db/connection.pyx | 15 +- pyslurm/{core => }/db/job.pxd | 23 ++-- pyslurm/{core => }/db/job.pyx | 52 +++++-- pyslurm/{core => }/db/qos.pxd | 6 +- pyslurm/{core => }/db/qos.pyx | 6 +- pyslurm/{core => }/db/stats.pxd | 12 +- pyslurm/{core => }/db/stats.pyx | 12 +- pyslurm/{core => }/db/step.pxd | 16 ++- pyslurm/{core => }/db/step.pyx | 10 +- pyslurm/{core => }/db/tres.pxd | 2 +- pyslurm/{core => }/db/tres.pyx | 2 +- pyslurm/{core => }/db/util.pxd | 2 +- pyslurm/{core => }/db/util.pyx | 0 pyslurm/utils/__init__.pxd | 2 + pyslurm/utils/__init__.py | 44 ++++++ pyslurm/{core/common => utils}/cstr.pxd | 0 pyslurm/{core/common => utils}/cstr.pyx | 0 pyslurm/{core/common => utils}/ctime.pxd | 2 +- pyslurm/{core/common => utils}/ctime.pyx | 12 +- .../common/__init__.pxd => utils/helpers.pxd} | 4 +- .../common/__init__.pyx => utils/helpers.pyx} | 12 +- pyslurm/{core/common => utils}/uint.pxd | 0 pyslurm/{core/common => utils}/uint.pyx | 0 tests/integration/test_job_steps.py | 2 +- tests/integration/test_node.py | 2 +- tests/unit/test_common.py | 8 +- tests/unit/test_db_slurm_list.py | 2 +- 84 files changed, 803 insertions(+), 474 deletions(-) create mode 120000 docs/logo.png create mode 100644 docs/reference/.pages create mode 100644 docs/reference/config.md create mode 100644 docs/reference/db/.pages create mode 100644 docs/reference/db/cluster.md create mode 100644 docs/reference/db/connection.md create mode 100644 docs/reference/db/event.md create mode 100644 docs/reference/db/index.md create mode 100644 docs/reference/db/job.md create mode 100644 docs/reference/db/jobsearchfilter.md create mode 100644 docs/reference/db/jobstats.md create mode 100644 docs/reference/db/jobstep.md create mode 100644 docs/reference/db/reservation.md create mode 100644 docs/reference/exceptions.md create mode 100644 docs/reference/frontend.md create mode 100644 docs/reference/hostlist.md create mode 100644 docs/reference/index.md create mode 100644 docs/reference/job.md create mode 100644 docs/reference/jobstep.md create mode 100644 docs/reference/jobsubmitdescription.md create mode 100644 docs/reference/node.md create mode 100644 docs/reference/old/.pages create mode 100644 docs/reference/old/db/.pages create mode 100644 docs/reference/old/db/job.md create mode 100644 docs/reference/old/job.md create mode 100644 docs/reference/old/jobstep.md create mode 100644 docs/reference/old/node.md create mode 100644 docs/reference/partition.md create mode 100644 docs/reference/reservation.md create mode 100644 docs/reference/statistics.md create mode 100644 docs/reference/topology.md create mode 100644 docs/reference/trigger.md create mode 100644 docs/reference/utilities.md rename docs/pyslurm-docs.png => logo.png (100%) rename pyslurm/{core => }/db/__init__.pxd (100%) rename pyslurm/{core => }/db/__init__.py (81%) rename pyslurm/{core => }/db/connection.pxd (100%) rename pyslurm/{core => }/db/connection.pyx (86%) rename pyslurm/{core => }/db/job.pxd (94%) rename pyslurm/{core => }/db/job.pyx (92%) rename pyslurm/{core => }/db/qos.pxd (93%) rename pyslurm/{core => }/db/qos.pyx (97%) rename pyslurm/{core => }/db/stats.pxd (95%) rename pyslurm/{core => }/db/stats.pyx (97%) rename pyslurm/{core => }/db/step.pxd (88%) rename pyslurm/{core => }/db/step.pyx (95%) rename pyslurm/{core => }/db/tres.pxd (97%) rename pyslurm/{core => }/db/tres.pyx (98%) rename pyslurm/{core => }/db/util.pxd (97%) rename pyslurm/{core => }/db/util.pyx (100%) create mode 100644 pyslurm/utils/__init__.pxd create mode 100644 pyslurm/utils/__init__.py rename pyslurm/{core/common => utils}/cstr.pxd (100%) rename pyslurm/{core/common => utils}/cstr.pyx (100%) rename pyslurm/{core/common => utils}/ctime.pxd (97%) rename pyslurm/{core/common => utils}/ctime.pyx (95%) rename pyslurm/{core/common/__init__.pxd => utils/helpers.pxd} (93%) rename pyslurm/{core/common/__init__.pyx => utils/helpers.pyx} (96%) rename pyslurm/{core/common => utils}/uint.pxd (100%) rename pyslurm/{core/common => utils}/uint.pyx (100%) diff --git a/README.md b/README.md index 9e92dc43..1db47f45 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,23 @@ -# PySlurm +# PySlurm Logo -[![PySlurm](https://github.com/PySlurm/pyslurm/actions/workflows/pyslurm.yml/badge.svg?branch=main)](https://github.com/PySlurm/pyslurm/actions/workflows/pyslurm.yml) +pyslurm is the Python client library for the [Slurm Workload Manager](https://slurm.schedmd.com) -## Overview - -PySlurm is the Python client library for the [Slurm](https://slurm.schedmd.com) HPC Scheduler. - -## Prerequisites +## Requirements * [Slurm](https://slurm.schedmd.com) - Slurm shared library and header files * [Python](https://www.python.org) - >= 3.6 * [Cython](https://cython.org) - >= 0.29.30 but < 3.0 -This PySlurm branch is for the Slurm Major-Release 23.02 +This Version is for Slurm 23.02.x + +## Versioning + +In pyslurm, the versioning scheme follows the official Slurm versioning. The +first two numbers (`MAJOR.MINOR`) always correspond to Slurms Major-Release, +for example `23.02`. +The last number (`MICRO`) is however not tied in any way to Slurms `MICRO` +version, but is instead PySlurm's internal Patch-Level. For example, any +pyslurm 23.02.X version should work with any Slurm 23.02.X release. ## Installation @@ -21,14 +26,14 @@ By default, it is searched inside `/usr/include` for the Header files and in For Slurm installations in different locations, you will need to provide the corresponding paths to the necessary files. -You can specify these Paths with environment variables (recommended), for example: +You can specify those with environment variables (recommended), for example: ```shell export SLURM_INCLUDE_DIR=/opt/slurm/23.02/include export SLURM_LIB_DIR=/opt/slurm/23.02/lib ``` -Then you can proceed to install PySlurm, for example by cloning the Repository: +Then you can proceed to install pyslurm, for example by cloning the Repository: ```shell git clone https://github.com/PySlurm/pyslurm.git && cd pyslurm @@ -40,105 +45,14 @@ pip install . Also see `python setup.py --help` -## Release Versioning - -PySlurm's versioning scheme follows the official Slurm versioning. The first -two numbers (MAJOR.MINOR) always correspond to Slurms Major-Release, for example -`23.02`. The last number (MICRO) is however not tied in any way to Slurms -MICRO version. For example, any PySlurm 23.02.X version should work with any -Slurm 23.02.X release. - -## Documentation - -The API documentation is hosted at . - -To build the docs locally, use [Sphinx](http://www.sphinx-doc.org) to generate -the documentation from the reStructuredText based docstrings found in the -pyslurm module once it is built: - -```shell -cd doc -make clean -make html -``` - -## Testing - -PySlurm requires an installation of Slurm. - -### Using a Test Container - -To run tests locally without an existing Slurm cluster, `docker` and -`docker-compose` is required. - -Clone the project: - -```shell -git clone https://github.com/PySlurm/pyslurm.git -cd pyslurm -``` - -Start the Slurm container in the background: - -```shell -docker-compose up -d -``` - -The cluster takes a few seconds to start all the required Slurm services. Tail -the logs: - -```shell -docker-compose logs -f -``` - -When the cluster is ready, you will see the following log message: - -```text -Cluster is now available -``` - -Press CTRL+C to stop tailing the logs. Slurm is now running in a container in -detached mode. `docker-compose` also bind mounds the git directory inside the -container at `/pyslurm` so that the container has access to the test cases. - -Install test dependencies: - -```shell -pipenv sync --dev -``` - -Execute the tests inside the container: - -```shell -pipenv run pytest -sv scripts/run_tests_in_container.py -``` - -When testing is complete, stop the running Slurm container: - -```shell -docker-compose down -``` - -### Testing on an Existing Slurm Cluster - -You may also choose to clone the project and run tests on a node where Slurm is -already compiled and installed: - -```shell -git clone https://github.com/PySlurm/pyslurm.git -cd pyslurm -pip install . -./scripts/configure.sh -pipenv sync --dev -pipenv run pytest -sv -``` - ## Contributors -PySlurm is made by [contributors like +pyslurm is made by [contributors like you](https://github.com/PySlurm/pyslurm/graphs/contributors). -## Help +## Support + +Feel free to ask questions in the [GitHub +Discussions](https://github.com/orgs/PySlurm/discussions) -Ask questions on the [PySlurm Google -Group](https://groups.google.com/forum/#!forum/pyslurm) +Found a bug or you are missing a feature? Feel free to [open an Issue!](https://github.com/PySlurm/pyslurm/issues/new) diff --git a/doc_requirements.txt b/doc_requirements.txt index 7497dfa3..d7e92631 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -4,3 +4,4 @@ setuptools mkdocstrings[python] mike mkdocs-material +mkdocs-awesome-pages-plugin diff --git a/docs/index.md b/docs/index.md index a3097617..612c7a5e 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,52 +1 @@ ---- -hide: - - navigation ---- -# PySlurm: Slurm Interface to python - -This module provides a low-level Python wrapper around the Slurm C-API using Cython. - -::: pyslurm.config - handler: python - -::: pyslurm.front_end - handler: python - -::: pyslurm.hostlist - handler: python - -::: pyslurm.job - handler: python - -::: pyslurm.jobstep - handler: python - -::: pyslurm.node - handler: python - -::: pyslurm.partition - handler: python - -::: pyslurm.reservation - handler: python - -::: pyslurm.slurmdb_events - handler: python - -::: pyslurm.slurmdb_reservations - handler: python - -::: pyslurm.slurmdb_clusters - handler: python - -::: pyslurm.slurmdb_jobs - handler: python - -::: pyslurm.statistics - handler: python - -::: pyslurm.topology - handler: python - -::: pyslurm.trigger - handler: python \ No newline at end of file +--8<-- "README.md" diff --git a/docs/logo.png b/docs/logo.png new file mode 120000 index 00000000..a9c1a7c8 --- /dev/null +++ b/docs/logo.png @@ -0,0 +1 @@ +../logo.png \ No newline at end of file diff --git a/docs/reference/.pages b/docs/reference/.pages new file mode 100644 index 00000000..4c3f8599 --- /dev/null +++ b/docs/reference/.pages @@ -0,0 +1,3 @@ +title: API Reference +nav: + - ... diff --git a/docs/reference/config.md b/docs/reference/config.md new file mode 100644 index 00000000..94b0438e --- /dev/null +++ b/docs/reference/config.md @@ -0,0 +1,10 @@ +--- +title: Config +--- + +!!! warning + This API is currently being completely reworked, and is subject to be + removed in the future when a replacement is introduced + +::: pyslurm.config + handler: python diff --git a/docs/reference/db/.pages b/docs/reference/db/.pages new file mode 100644 index 00000000..b7263357 --- /dev/null +++ b/docs/reference/db/.pages @@ -0,0 +1,3 @@ +title: Database +nav: + - ... diff --git a/docs/reference/db/cluster.md b/docs/reference/db/cluster.md new file mode 100644 index 00000000..e6d0a900 --- /dev/null +++ b/docs/reference/db/cluster.md @@ -0,0 +1,10 @@ +--- +title: Cluster +--- + +!!! warning + This API is currently being completely reworked, and is subject to be + removed in the future when a replacement is introduced + +::: pyslurm.slurmdb_clusters + handler: python diff --git a/docs/reference/db/connection.md b/docs/reference/db/connection.md new file mode 100644 index 00000000..27c904fc --- /dev/null +++ b/docs/reference/db/connection.md @@ -0,0 +1,6 @@ +--- +title: Connection +--- + +::: pyslurm.db.Connection + handler: python diff --git a/docs/reference/db/event.md b/docs/reference/db/event.md new file mode 100644 index 00000000..020abcac --- /dev/null +++ b/docs/reference/db/event.md @@ -0,0 +1,10 @@ +--- +title: Event +--- + +!!! warning + This API is currently being completely reworked, and is subject to be + removed in the future when a replacement is introduced + +::: pyslurm.slurmdb_events + handler: python diff --git a/docs/reference/db/index.md b/docs/reference/db/index.md new file mode 100644 index 00000000..98f3b38e --- /dev/null +++ b/docs/reference/db/index.md @@ -0,0 +1,4 @@ +# pyslurm.db + +The `pyslurm.db` package contains all functionality to interact with the Slurm +Database Daemon (slurmdbd) diff --git a/docs/reference/db/job.md b/docs/reference/db/job.md new file mode 100644 index 00000000..a2c7fadd --- /dev/null +++ b/docs/reference/db/job.md @@ -0,0 +1,13 @@ +--- +title: Job +--- + +!!! note + This supersedes the [pyslurm.slurmdb_job](../old/db/job.md) class, which + will be removed in a future release + +::: pyslurm.db.Job + handler: python + +::: pyslurm.db.Jobs + handler: python diff --git a/docs/reference/db/jobsearchfilter.md b/docs/reference/db/jobsearchfilter.md new file mode 100644 index 00000000..fa3864c5 --- /dev/null +++ b/docs/reference/db/jobsearchfilter.md @@ -0,0 +1,6 @@ +--- +title: JobSearchFilter +--- + +::: pyslurm.db.JobSearchFilter + handler: python diff --git a/docs/reference/db/jobstats.md b/docs/reference/db/jobstats.md new file mode 100644 index 00000000..35f31ac6 --- /dev/null +++ b/docs/reference/db/jobstats.md @@ -0,0 +1,6 @@ +--- +title: JobStatistics +--- + +::: pyslurm.db.JobStatistics + handler: python diff --git a/docs/reference/db/jobstep.md b/docs/reference/db/jobstep.md new file mode 100644 index 00000000..392fab65 --- /dev/null +++ b/docs/reference/db/jobstep.md @@ -0,0 +1,9 @@ +--- +title: JobStep +--- + +::: pyslurm.db.JobStep + handler: python + +::: pyslurm.db.JobSteps + handler: python diff --git a/docs/reference/db/reservation.md b/docs/reference/db/reservation.md new file mode 100644 index 00000000..1a1af0c4 --- /dev/null +++ b/docs/reference/db/reservation.md @@ -0,0 +1,10 @@ +--- +title: Reservation +--- + +!!! warning + This API is currently being completely reworked, and is subject to be + removed in the future when a replacement is introduced + +::: pyslurm.slurmdb_reservations + handler: python diff --git a/docs/reference/exceptions.md b/docs/reference/exceptions.md new file mode 100644 index 00000000..90876435 --- /dev/null +++ b/docs/reference/exceptions.md @@ -0,0 +1,9 @@ +--- +title: Exceptions +--- + +::: pyslurm.PyslurmError + handler: python + +::: pyslurm.RPCError + handler: python diff --git a/docs/reference/frontend.md b/docs/reference/frontend.md new file mode 100644 index 00000000..5247e540 --- /dev/null +++ b/docs/reference/frontend.md @@ -0,0 +1,10 @@ +--- +title: Frontend +--- + +!!! warning + This API is currently being completely reworked, and is subject to be + removed in the future when a replacement is introduced + +::: pyslurm.front_end + handler: python diff --git a/docs/reference/hostlist.md b/docs/reference/hostlist.md new file mode 100644 index 00000000..dc2d81ee --- /dev/null +++ b/docs/reference/hostlist.md @@ -0,0 +1,10 @@ +--- +title: Hostlist +--- + +!!! warning + This API is currently being completely reworked, and is subject to be + removed in the future when a replacement is introduced + +::: pyslurm.hostlist + handler: python diff --git a/docs/reference/index.md b/docs/reference/index.md new file mode 100644 index 00000000..e49352fd --- /dev/null +++ b/docs/reference/index.md @@ -0,0 +1,48 @@ +# pyslurm + +The `pyslurm` package is a wrapper around the Slurm C-API + + +!!! warning + Please note that the `pyslurm` API is currently being completely reworked. + Reworked classes and functions that replace functionality of the old API + will be marked as such, with a link to the documentation of its old + counterpart. + + Old API functionality that is already replaced is marked as deprecated, + and will be removed at some point in the future. + + The new reworked classes will be tested thoroughly before making them + available here, although it is of course still possible that some bugs may + appear here and there, which we will try to identify as best as possible! + + In addition, since these classes are pretty new, their interface + (precisely: attribute names, return types) should not yet be considered + 100% stable, and changes may be made in rare cases if it makes sense to do + so. + + If you are using the new-style API, we would like to know your feedback on + it! + + +## Functionality already reworked: + +* Job API + * [pyslurm.Job][] + * [pyslurm.JobStep][] + * [pyslurm.JobSteps][] + * [pyslurm.Jobs][] + * [pyslurm.JobSubmitDescription][] +* Database Job API + * [pyslurm.db.Job][] + * [pyslurm.db.JobStep][] + * [pyslurm.db.Jobs][] + * [pyslurm.db.JobSearchFilter][] +* Node API + * [pyslurm.Node][] + * [pyslurm.Nodes][] +* New Exceptions + * [pyslurm.RPCError][] + * [pyslurm.PyslurmError][] +* New utility functions + * [pyslurm.utils][] diff --git a/docs/reference/job.md b/docs/reference/job.md new file mode 100644 index 00000000..8e3d0c6e --- /dev/null +++ b/docs/reference/job.md @@ -0,0 +1,13 @@ +--- +title: Job +--- + +!!! note + This supersedes the [pyslurm.job](old/job.md) class, which will be + removed in a future release + +::: pyslurm.Job + handler: python + +::: pyslurm.Jobs + handler: python diff --git a/docs/reference/jobstep.md b/docs/reference/jobstep.md new file mode 100644 index 00000000..2ce6ef7f --- /dev/null +++ b/docs/reference/jobstep.md @@ -0,0 +1,13 @@ +--- +title: JobStep +--- + +!!! note + This supersedes the [pyslurm.jobstep](old/jobstep.md) class, which + will be removed in a future release + +::: pyslurm.JobStep + handler: python + +::: pyslurm.JobSteps + handler: python diff --git a/docs/reference/jobsubmitdescription.md b/docs/reference/jobsubmitdescription.md new file mode 100644 index 00000000..bd31bac9 --- /dev/null +++ b/docs/reference/jobsubmitdescription.md @@ -0,0 +1,6 @@ +--- +title: JobSubmitDescription +--- + +::: pyslurm.JobSubmitDescription + handler: python diff --git a/docs/reference/node.md b/docs/reference/node.md new file mode 100644 index 00000000..ccb16c54 --- /dev/null +++ b/docs/reference/node.md @@ -0,0 +1,13 @@ +--- +title: Node +--- + +!!! note + This supersedes the [pyslurm.node](old/node.md) class, which will be + removed in a future release + +::: pyslurm.Node + handler: python + +::: pyslurm.Nodes + handler: python diff --git a/docs/reference/old/.pages b/docs/reference/old/.pages new file mode 100644 index 00000000..ae2a9b18 --- /dev/null +++ b/docs/reference/old/.pages @@ -0,0 +1,3 @@ +hide: true +nav: + - ... diff --git a/docs/reference/old/db/.pages b/docs/reference/old/db/.pages new file mode 100644 index 00000000..ae2a9b18 --- /dev/null +++ b/docs/reference/old/db/.pages @@ -0,0 +1,3 @@ +hide: true +nav: + - ... diff --git a/docs/reference/old/db/job.md b/docs/reference/old/db/job.md new file mode 100644 index 00000000..4046026c --- /dev/null +++ b/docs/reference/old/db/job.md @@ -0,0 +1,10 @@ +--- +title: Job +--- + +!!! warning + This is superseded by [pyslurm.db.Job](../../db/job.md) class and will + be removed in a future release + +::: pyslurm.slurmdb_jobs + handler: python diff --git a/docs/reference/old/job.md b/docs/reference/old/job.md new file mode 100644 index 00000000..fb8f694a --- /dev/null +++ b/docs/reference/old/job.md @@ -0,0 +1,10 @@ +--- +title: Job +--- + +!!! warning + This class is superseded by [pyslurm.Job](../job.md) and will be removed + in a future release. + +::: pyslurm.job + handler: python diff --git a/docs/reference/old/jobstep.md b/docs/reference/old/jobstep.md new file mode 100644 index 00000000..2147e53b --- /dev/null +++ b/docs/reference/old/jobstep.md @@ -0,0 +1,10 @@ +--- +title: JobStep +--- + +!!! warning + This class is superseded by [pyslurm.JobStep](../jobstep.md) and will be + removed in a future release. + +::: pyslurm.jobstep + handler: python diff --git a/docs/reference/old/node.md b/docs/reference/old/node.md new file mode 100644 index 00000000..ec80324a --- /dev/null +++ b/docs/reference/old/node.md @@ -0,0 +1,10 @@ +--- +title: Node +--- + +!!! warning + This class is superseded by [pyslurm.Node](../node.md) and will be + removed in a future release. + +::: pyslurm.node + handler: python diff --git a/docs/reference/partition.md b/docs/reference/partition.md new file mode 100644 index 00000000..6ab4b865 --- /dev/null +++ b/docs/reference/partition.md @@ -0,0 +1,10 @@ +--- +title: Partition +--- + +!!! warning + This API is currently being completely reworked, and is subject to be + removed in the future when a replacement is introduced + +::: pyslurm.partition + handler: python diff --git a/docs/reference/reservation.md b/docs/reference/reservation.md new file mode 100644 index 00000000..563e29db --- /dev/null +++ b/docs/reference/reservation.md @@ -0,0 +1,10 @@ +--- +title: Reservation +--- + +!!! warning + This API is currently being completely reworked, and is subject to be + removed in the future when a replacement is introduced + +::: pyslurm.reservation + handler: python diff --git a/docs/reference/statistics.md b/docs/reference/statistics.md new file mode 100644 index 00000000..1f2b2e37 --- /dev/null +++ b/docs/reference/statistics.md @@ -0,0 +1,10 @@ +--- +title: Statistics +--- + +!!! warning + This API is currently being completely reworked, and is subject to be + removed in the future when a replacement is introduced + +::: pyslurm.statistics + handler: python diff --git a/docs/reference/topology.md b/docs/reference/topology.md new file mode 100644 index 00000000..1cb107a1 --- /dev/null +++ b/docs/reference/topology.md @@ -0,0 +1,10 @@ +--- +title: Topology +--- + +!!! warning + This API is currently being completely reworked, and is subject to be + removed in the future when a replacement is introduced + +::: pyslurm.topology + handler: python diff --git a/docs/reference/trigger.md b/docs/reference/trigger.md new file mode 100644 index 00000000..308a3e3f --- /dev/null +++ b/docs/reference/trigger.md @@ -0,0 +1,10 @@ +--- +title: Trigger +--- + +!!! warning + This API is currently being completely reworked, and is subject to be + removed in the future when a replacement is introduced + +::: pyslurm.trigger + handler: python diff --git a/docs/reference/utilities.md b/docs/reference/utilities.md new file mode 100644 index 00000000..b290d055 --- /dev/null +++ b/docs/reference/utilities.md @@ -0,0 +1,39 @@ +--- +title: Utilities +--- + +::: pyslurm.utils + handler: python + +::: pyslurm.utils.timestr_to_secs + handler: python + +::: pyslurm.utils.timestr_to_mins + handler: python + +::: pyslurm.utils.secs_to_timestr + handler: python + +::: pyslurm.utils.mins_to_timestr + handler: python + +::: pyslurm.utils.date_to_timestamp + handler: python + +::: pyslurm.utils.timestamp_to_date + handler: python + +::: pyslurm.utils.expand_range_str + handler: python + +::: pyslurm.utils.humanize + handler: python + +::: pyslurm.utils.dehumanize + handler: python + +::: pyslurm.utils.nodelist_from_range_str + handler: python + +::: pyslurm.utils.nodelist_to_range_str + handler: python diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index 18fe35d6..9562d9be 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -1,4 +1,4 @@ /* Maximum space for text block */ .md-grid { - max-width: 70%; + max-width: 75%; } diff --git a/docs/pyslurm-docs.png b/logo.png similarity index 100% rename from docs/pyslurm-docs.png rename to logo.png diff --git a/mkdocs.yml b/mkdocs.yml index 56aa68c6..bd62c384 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,17 +1,57 @@ -site_name: pyslurm +site_dir: "site" +site_name: "pyslurm" +site_url: "https://pyslurm.github.io" +repo_url: "https://github.com/PySlurm/pyslurm" +repo_name: "PySlurm/pyslurm" +copyright: Copyright © 2023 The PySlurm Authors + theme: name: "material" - logo: pyslurm-docs.png + logo: logo.png + features: + - navigation.sections + - navigation.indexes + - navigation.tabs + # - navigation.tabs.sticky + - navigation.top + - content.code.copy + - toc.follow + palette: + - media: "(prefers-color-scheme: light)" + scheme: default + accent: purple + toggle: + icon: material/brightness-7 + name: Switch to dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + toggle: + icon: material/brightness-4 + name: Switch to light mode + font: + text: Roboto + code: Roboto Mono + plugins: -- search -- mkdocstrings: - handlers: - python: - options: - filters: ["!^_"] - docstring_style: google - show_signature: true - show_root_heading: true + - search + - awesome-pages + - mike + - mkdocstrings: + handlers: + python: + import: + - https://docs.python.org/3/objects.inv + options: + filters: ["!^_"] + docstring_style: google + show_signature: true + show_root_heading: true + +markdown_extensions: + - admonition + - pymdownx.snippets: + check_paths: true + extra: version: provider: mike diff --git a/pyslurm/__init__.py b/pyslurm/__init__.py index aa9e26c6..750199da 100644 --- a/pyslurm/__init__.py +++ b/pyslurm/__init__.py @@ -1,10 +1,6 @@ -""" -PySlurm: Python bindings for the Slurm C API -============================================ - -PySlurm is a Cython wrapper around Slurm C API functions. +"""pyslurm package -More information about Slurm can be found at https://slurm.schedmd.com. +pyslurm is a wrapper around the Slurm C-API. """ from __future__ import absolute_import @@ -16,6 +12,9 @@ from .pyslurm import * from .__version__ import __version__ +from pyslurm import utils +from pyslurm import db + from pyslurm.core.job import ( Job, Jobs, @@ -23,38 +22,12 @@ JobSteps, JobSubmitDescription, ) - -from pyslurm.core import db from pyslurm.core.node import Node, Nodes - -import pyslurm.core.error +from pyslurm.core import error from pyslurm.core.error import ( + PyslurmError, RPCError, ) - -# Utility time functions -from pyslurm.core.common.ctime import ( - timestr_to_secs, - timestr_to_mins, - secs_to_timestr, - mins_to_timestr, - date_to_timestamp, - timestamp_to_date, -) - -# General utility functions -from pyslurm.core.common import ( - uid_to_name, - gid_to_name, - user_to_uid, - group_to_gid, - expand_range_str, - humanize, - dehumanize, - nodelist_from_range_str, - nodelist_to_range_str, -) - from pyslurm.core import slurmctld # Initialize slurm api diff --git a/pyslurm/api.pxd b/pyslurm/api.pxd index 9b19ec9a..b780fdba 100644 --- a/pyslurm/api.pxd +++ b/pyslurm/api.pxd @@ -23,4 +23,4 @@ # cython: language_level=3 from pyslurm cimport slurm -from pyslurm.core.common cimport cstr +from pyslurm.utils cimport cstr diff --git a/pyslurm/core/error.pyx b/pyslurm/core/error.pyx index 69130abd..a5924d08 100644 --- a/pyslurm/core/error.pyx +++ b/pyslurm/core/error.pyx @@ -22,7 +22,7 @@ # cython: c_string_type=unicode, c_string_encoding=default # cython: language_level=3 -from pyslurm.core.common cimport cstr +from pyslurm.utils cimport cstr from pyslurm cimport slurm from pyslurm.slurm cimport slurm_get_errno @@ -65,7 +65,11 @@ def get_last_slurm_error(): return (errno, slurm_strerror(errno)) -class RPCError(Exception): +class PyslurmError(Exception): + """The base Exception for all Pyslurm errors.""" + + +class RPCError(PyslurmError): """Exception for handling Slurm RPC errors. Args: @@ -75,6 +79,13 @@ class RPCError(Exception): msg (str): An optional, custom error description. If this is set, the errno will not be translated to its string representation. + + Examples: + >>> import pyslurm + ... try: + ... myjob = pyslurm.Job.load(9999) + ... except pyslurm.RPCError as e: + ... print("Loading the Job failed") """ def __init__(self, errno=slurm.SLURM_ERROR, msg=None): self.msg = msg diff --git a/pyslurm/core/job/job.pxd b/pyslurm/core/job/job.pxd index c41c8ced..5e8dfd4f 100644 --- a/pyslurm/core/job/job.pxd +++ b/pyslurm/core/job/job.pxd @@ -22,9 +22,9 @@ # cython: c_string_type=unicode, c_string_encoding=default # cython: language_level=3 -from pyslurm.core.common cimport cstr, ctime -from pyslurm.core.common.uint cimport * -from pyslurm.core.common.ctime cimport time_t +from pyslurm.utils cimport cstr, ctime +from pyslurm.utils.uint cimport * +from pyslurm.utils.ctime cimport time_t from libc.string cimport memcpy, memset from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t, int64_t @@ -71,9 +71,9 @@ cdef class Jobs(dict): """A collection of Job objects. Args: - jobs (Union[list, dict], optional): + jobs (Union[list, dict], optional=None): Jobs to initialize this collection with. - freeze (bool, optional): + frozen (bool, optional=False): Control whether this collection is "frozen" when reloading Job information. @@ -89,7 +89,7 @@ cdef class Jobs(dict): Total amount of CPU-Time used by all the Jobs in the collection. This is the result of multiplying the run_time with the amount of cpus for each job. - freeze (bool): + frozen (bool): If this is set to True and the reload() method is called, then *ONLY* Jobs that already exist in this collection will be reloaded. New Jobs that are discovered will not be added to this @@ -103,7 +103,7 @@ cdef class Jobs(dict): slurm_job_info_t tmp_info cdef public: - freeze + frozen cdef class Job: diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 1e160c80..7f5beae3 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -31,16 +31,16 @@ from os import WIFSIGNALED, WIFEXITED, WTERMSIG, WEXITSTATUS import re from typing import Union -from pyslurm.core.common import cstr, ctime -from pyslurm.core.common.uint import * +from pyslurm.utils import cstr, ctime +from pyslurm.utils.uint import * from pyslurm.core.job.util import * from pyslurm.core.error import ( RPCError, verify_rpc, slurm_errno, ) -from pyslurm.core.common.ctime import _raw_time -from pyslurm.core.common import ( +from pyslurm.utils.ctime import _raw_time +from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, signal_to_num, @@ -59,8 +59,8 @@ cdef class Jobs(dict): def __dealloc__(self): slurm_free_job_info_msg(self.info) - def __init__(self, jobs=None, freeze=False): - self.freeze = freeze + def __init__(self, jobs=None, frozen=False): + self.frozen = frozen if isinstance(jobs, dict): self.update(jobs) @@ -72,7 +72,7 @@ cdef class Jobs(dict): self[job.id] = job @staticmethod - def load(preload_passwd_info=False, freeze=False): + def load(preload_passwd_info=False, frozen=False): """Retrieve all Jobs from the Slurm controller Args: @@ -83,11 +83,11 @@ cdef class Jobs(dict): where a UID/GID is translated to a name. If True, the information will fetched and stored in each of the Job instances. - freeze (bool, optional): - Decide whether this collection of Jobs should be "frozen". + frozen (bool, optional): + Decide whether this collection of Jobs should be frozen. Returns: - (Jobs): A collection of Job objects. + (pyslurm.Jobs): A collection of Job objects. Raises: RPCError: When getting all the Jobs from the slurmctld failed. @@ -134,7 +134,7 @@ cdef class Jobs(dict): # instance. jobs.info.record_count = 0 - jobs.freeze = freeze + jobs.frozen = frozen return jobs def reload(self): @@ -149,12 +149,12 @@ cdef class Jobs(dict): if jid in reloaded_jobs: # Put the new data in. self[jid] = reloaded_jobs[jid] - elif not self.freeze: + elif not self.frozen: # Remove this instance from the current collection, as the Job # doesn't exist anymore. del self[jid] - if not self.freeze: + if not self.frozen: for jid in reloaded_jobs: if jid not in self: self[jid] = reloaded_jobs[jid] @@ -167,8 +167,8 @@ cdef class Jobs(dict): This function fills in the "steps" attribute for all Jobs in the collection. - Note: - Pending Jobs will be ignored, since they don't have any Steps yet. + Note: Pending Jobs will be ignored, since they don't have any Steps + yet. Raises: RPCError: When retrieving the Job information for all the Steps @@ -380,8 +380,8 @@ cdef class Job: RPCError: When cancelling the Job was not successful. Examples: - >>> from pyslurm import Job - >>> Job(9999).cancel() + >>> import pyslurm + >>> pyslurm.Job(9999).cancel() """ self.send_signal(9) @@ -394,8 +394,8 @@ cdef class Job: RPCError: When suspending the Job was not successful. Examples: - >>> from pyslurm import Job - >>> Job(9999).suspend() + >>> import pyslurm + >>> pyslurm.Job(9999).suspend() """ # TODO: Report as a misbehaviour to schedmd that slurm_suspend is not # correctly returning error code when it cannot find the job in @@ -413,8 +413,8 @@ cdef class Job: RPCError: When unsuspending the Job was not successful. Examples: - >>> from pyslurm import Jobs - >>> Job(9999).unsuspend() + >>> import pyslurm + >>> pyslurm.Job(9999).unsuspend() """ # Same problem as described in suspend() verify_rpc(slurm_resume(self.id)) @@ -425,7 +425,7 @@ cdef class Job: Implements the slurm_update_job RPC. Args: - changes (JobSubmitDescription): + changes (pyslurm.JobSubmitDescription): A JobSubmitDescription object which contains all the modifications that should be done on the Job. @@ -433,11 +433,11 @@ cdef class Job: RPCError: When updating the Job was not successful. Examples: - >>> from pyslurm import Job, JobSubmitDescription + >>> import pyslurm >>> >>> # Setting the new time-limit to 20 days - >>> changes = JobSubmitDescription(time_limit="20-00:00:00") - >>> Job(9999).modify(changes) + >>> changes = pyslurm.JobSubmitDescription(time_limit="20-00:00:00") + >>> pyslurm.Job(9999).modify(changes) """ changes._create_job_submit_desc(is_update=True) changes.ptr.job_id = self.id @@ -454,20 +454,19 @@ cdef class Job: release the Job again. If you specify the mode as "user", the User will also be able to release the job. - Note: - Uses the modify() function to set the Job's priority to 0. + Note: Uses the modify() function to set the Job's priority to 0. Raises: RPCError: When holding the Job was not successful. Examples: - >>> from pyslurm import Job + >>> import pyslurm >>> >>> # Holding a Job (in "admin" mode by default) - >>> Job(9999).hold() + >>> pyslurm.Job(9999).hold() >>> >>> # Holding a Job in "user" mode - >>> Job(9999).hold(mode="user") + >>> pyslurm.Job(9999).hold(mode="user") """ cdef JobSubmitDescription job_sub = JobSubmitDescription(priority=0) @@ -479,16 +478,15 @@ cdef class Job: def release(self): """Release a currently held Job, allowing it to be scheduled again. - Note: - Uses the modify() function to reset the priority back to + Note: Uses the modify() function to reset the priority back to be controlled by the slurmctld's priority calculation routine. Raises: RPCError: When releasing a held Job was not successful. Examples: - >>> from pyslurm import Job - >>> Job(9999).release() + >>> import pyslurm + >>> pyslurm.Job(9999).release() """ self.modify(JobSubmitDescription(priority=slurm.INFINITE)) @@ -498,7 +496,7 @@ cdef class Job: Implements the slurm_requeue RPC. Args: - hold (bool): + hold (bool, optional): Controls whether the Job should be put in a held state or not. Default for this is 'False', so it will not be held. @@ -506,14 +504,14 @@ cdef class Job: RPCError: When requeing the Job was not successful. Examples: - >>> from pyslurm import Job + >>> import pyslurm >>> >>> # Requeing a Job while allowing it to be >>> # scheduled again immediately - >>> Job(9999).requeue() + >>> pyslurm.Job(9999).requeue() >>> >>> # Requeing a Job while putting it in a held state - >>> Job(9999).requeue(hold=True) + >>> pyslurm.Job(9999).requeue(hold=True) """ cdef uint32_t flags = 0 @@ -535,28 +533,24 @@ cdef class Job: RPCError: When sending the message to the Job was not successful. Examples: - >>> from pyslurm import Job - >>> Job(9999).notify("Hello Friends!") + >>> import pyslurm + >>> pyslurm.Job(9999).notify("Hello Friends!") """ verify_rpc(slurm_notify_job(self.id, msg)) def get_batch_script(self): """Return the content of the script for a Batch-Job. - Note: - The string returned also includes all the "\n" characters - (new-line). - Returns: (str): The content of the batch script. Raises: RPCError: When retrieving the Batch-Script for the Job was not - successful. + successful. Examples: - >>> from pyslurm import Job - >>> script = Job(9999).get_batch_script() + >>> import pyslurm + >>> script = pyslurm.Job(9999).get_batch_script() """ # The code for this function was taken from here: # https://github.com/SchedMD/slurm/blob/7162f15af8deaf02c3bbf940d59e818cdeb5c69d/src/api/job_info.c#L1319 @@ -1229,13 +1223,14 @@ cdef class Job: def get_resource_layout_per_node(self): """Retrieve the resource layout of this Job on each node. - This contains the following information: + The dict returned contains the following information for each node: * cpu_ids (str) * gres (dict) * memory (int) Returns: - (dict): Resource layout + (dict): Resource layout, where the key is the name of the name and + its value another dict with the components described above. """ # The code for this function is a modified reimplementation from here: # https://github.com/SchedMD/slurm/blob/d525b6872a106d32916b33a8738f12510ec7cf04/src/api/job_info.c#L739 diff --git a/pyslurm/core/job/sbatch_opts.pyx b/pyslurm/core/job/sbatch_opts.pyx index 91724d29..c6e0b400 100644 --- a/pyslurm/core/job/sbatch_opts.pyx +++ b/pyslurm/core/job/sbatch_opts.pyx @@ -184,8 +184,8 @@ def _find_opt(opt): def _parse_opts_from_batch_script(desc, script, overwrite): flags_and_vals = {} - if not script or not Path(script).is_file(): - return None + if not Path(script).is_file(): + raise ValueError("The script path you provided is not valid.") script = Path(script).read_text() for line in script.splitlines(): diff --git a/pyslurm/core/job/step.pxd b/pyslurm/core/job/step.pxd index 4cdd6c49..ed61d4d9 100644 --- a/pyslurm/core/job/step.pxd +++ b/pyslurm/core/job/step.pxd @@ -24,7 +24,7 @@ from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t from .job cimport Job - +from libc.string cimport memcpy, memset from pyslurm cimport slurm from pyslurm.slurm cimport ( job_step_info_t, @@ -43,9 +43,14 @@ from pyslurm.slurm cimport ( xfree, try_xmalloc, ) +from pyslurm.utils cimport cstr, ctime +from pyslurm.utils.uint cimport * +from pyslurm.utils.ctime cimport time_t +from pyslurm.core.job.task_dist cimport TaskDistribution + cdef class JobSteps(dict): - """A collection of :obj:`JobStep` objects for a given Job. + """A collection of [`pyslurm.JobStep`][] objects for a given Job. Args: job (Union[Job, int]): @@ -70,11 +75,15 @@ cdef class JobStep: """A Slurm Jobstep Args: - job (Union[Job, int]): + job_id (Union[Job, int], optional=0): The Job this Step belongs to. - step (Union[int, str]): + step_id (Union[int, str], optional=0): Step-ID for this JobStep object. + Other Parameters: + time_limit (int): + Time limit in Minutes for this step. + Raises: MemoryError: If malloc fails to allocate memory. diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index d84330b1..4b05aa5b 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -22,22 +22,17 @@ # cython: c_string_type=unicode, c_string_encoding=default # cython: language_level=3 -from libc.string cimport memcpy, memset -from pyslurm.core.common cimport cstr, ctime -from pyslurm.core.common import cstr, ctime -from pyslurm.core.common.uint cimport * -from pyslurm.core.common.uint import * -from pyslurm.core.common.ctime cimport time_t +from typing import Union +from pyslurm.utils import cstr, ctime +from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc -from pyslurm.core.common import ( +from pyslurm.utils.helpers import ( signal_to_num, instance_to_dict, uid_to_name, ) from pyslurm.core.job.util import cpu_freq_int_to_str -from pyslurm.core.job.task_dist cimport TaskDistribution - -from pyslurm.core.common.ctime import ( +from pyslurm.utils.ctime import ( secs_to_timestr, mins_to_timestr, timestr_to_mins, @@ -59,6 +54,15 @@ cdef class JobSteps(dict): @staticmethod def load(job): + """Load the Steps for a specific Job + + Args: + job (Union[Job, int]): + The Job for which the Steps should be loaded + + Returns: + (pyslurm.JobSteps): JobSteps of the Job + """ cdef Job _job _job = Job.load(job.id) if isinstance(job, Job) else Job.load(job) return JobSteps._load(_job) @@ -187,7 +191,7 @@ cdef class JobStep: Implements the slurm_get_job_steps RPC. Args: - job_id (Union[Job, int]): + job_id (Union[pyslurm.Job, int]): ID of the Job the Step belongs to. step_id (Union[int, str]): Step-ID for the Step to be loaded. @@ -249,12 +253,12 @@ cdef class JobStep: Examples: Specifying the signal as a string: - >>> from pyslurm import JobStep - >>> JobStep(9999, 1).send_signal("SIGUSR1") + >>> import pyslurm + >>> pyslurm.JobStep(9999, 1).send_signal("SIGUSR1") or passing in a numeric signal: - >>> JobStep(9999, 1).send_signal(9) + >>> pyslurm.JobStep(9999, 1).send_signal(9) """ step_id = self.ptr.step_id.step_id sig = signal_to_num(signal) @@ -269,54 +273,37 @@ cdef class JobStep: RPCError: When cancelling the Job was not successful. Examples: - >>> from pyslurm import JobStep - >>> JobStep(9999, 1).cancel() + >>> import pyslurm + >>> pyslurm.JobStep(9999, 1).cancel() """ step_id = self.ptr.step_id.step_id verify_rpc(slurm_kill_job_step(self.job_id, step_id, 9)) - def modify(self, step=None, **kwargs): + def modify(self, changes): """Modify a job step. Implements the slurm_update_step RPC. Args: - step (JobStep): + changes (pyslurm.JobStep): Another JobStep object which contains all the changes that should be applied to this instance. - **kwargs: - You can also specify all the changes as keyword arguments. - Allowed values are only attributes which can actually be set - on a JobStep instance. If a step is explicitly specified as - parameter, all **kwargs will be ignored. - Raises: RPCError: When updating the JobStep was not successful. Examples: - >>> from pyslurm import JobStep + >>> import pyslurm >>> >>> # Setting the new time-limit to 20 days - >>> changes = JobStep(time_limit="20-00:00:00") - >>> JobStep(9999, 1).modify(changes) - >>> - >>> # Or by specifying the changes directly to the modify function - >>> JobStep(9999, 1).modify(time_limit="20-00:00:00") + >>> changes = pyslurm.JobStep(time_limit="20-00:00:00") + >>> pyslurm.JobStep(9999, 1).modify(changes) """ - cdef JobStep js = self - - # Allow the user to both specify changes via object and **kwargs. - if step and isinstance(step, JobStep): - js = step - elif kwargs: - js = JobStep(**kwargs) - + cdef JobStep js = changes js._alloc_umsg() js.umsg.step_id = self.ptr.step_id.step_id js.umsg.job_id = self.ptr.step_id.job_id verify_rpc(slurm_update_step(js.umsg)) - def as_dict(self): """JobStep information formatted as a dictionary. @@ -450,6 +437,7 @@ def humanize_step_id(sid): else: return sid + def dehumanize_step_id(sid): if sid == "batch": return slurm.SLURM_BATCH_SCRIPT diff --git a/pyslurm/core/job/submission.pxd b/pyslurm/core/job/submission.pxd index ebf0b0c5..f9c9d877 100644 --- a/pyslurm/core/job/submission.pxd +++ b/pyslurm/core/job/submission.pxd @@ -38,10 +38,18 @@ from pyslurm.slurm cimport ( xfree, try_xmalloc, ) +from pyslurm.utils cimport cstr, ctime +from pyslurm.utils.uint cimport * +from pyslurm.utils.ctime cimport time_t +from pyslurm.core.job.task_dist cimport TaskDistribution cdef class JobSubmitDescription: - """Description of a Slurm Job. + """Submit Description for a Slurm Job. + + Args: + **kwargs (Any, optional=None): + Any valid Attribute this object has Attributes: name (str): diff --git a/pyslurm/core/job/submission.pyx b/pyslurm/core/job/submission.pyx index e1f4039d..f023f035 100644 --- a/pyslurm/core/job/submission.pyx +++ b/pyslurm/core/job/submission.pyx @@ -25,18 +25,15 @@ from os import getcwd from os import environ as pyenviron import re -import typing +from typing import Union, Any import shlex from pathlib import Path -from pyslurm.core.common cimport cstr, ctime -from pyslurm.core.common import cstr -from pyslurm.core.common.uint cimport * -from pyslurm.core.common.uint import * -from pyslurm.core.common.ctime cimport time_t +from pyslurm.utils import cstr +from pyslurm.utils.uint import * from pyslurm.core.job.util import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.core.job.sbatch_opts import _parse_opts_from_batch_script -from pyslurm.core.common.ctime import ( +from pyslurm.utils.ctime import ( secs_to_timestr, timestr_to_secs, mins_to_timestr, @@ -44,9 +41,7 @@ from pyslurm.core.common.ctime import ( timestamp_to_date, date_to_timestamp, ) -from pyslurm.core.job.task_dist cimport TaskDistribution - -from pyslurm.core.common import ( +from pyslurm.utils.helpers import ( humanize, dehumanize, signal_to_num, @@ -89,10 +84,11 @@ cdef class JobSubmitDescription: MemoryError: If malloc failed to allocate enough memory. Examples: - >>> desc = JobSubmitDescription( - >>> name="test-job", - >>> cpus_per_task=1, - >>> time_limit="10-00:00:00") + >>> import pyslurm + >>> desc = pyslurm.JobSubmitDescription( + ... name="test-job", + ... cpus_per_task=1, + ... time_limit="10-00:00:00") >>> >>> job_id = desc.submit() """ @@ -112,7 +108,8 @@ cdef class JobSubmitDescription: Args: overwrite (bool): If set to True, the value from an option found in the - environment will override its current value. Default is False + environment will override the current value of the attribute + in this instance. Default is False """ self._parse_env(overwrite) @@ -122,8 +119,11 @@ cdef class JobSubmitDescription: Args: overwrite (bool): If set to True, the value from an option found in the in the - batch script will override its current value. Default is False + batch script will override the current value of the attribute + in this instance. Default is False """ + if not self.script: + raise ValueError("You need to set the 'script' attribute first.") _parse_opts_from_batch_script(self, self.script, overwrite) def _parse_env(self, overwrite=False): diff --git a/pyslurm/core/job/task_dist.pxd b/pyslurm/core/job/task_dist.pxd index 5fe76488..4f8a073d 100644 --- a/pyslurm/core/job/task_dist.pxd +++ b/pyslurm/core/job/task_dist.pxd @@ -23,7 +23,7 @@ # cython: language_level=3 from pyslurm cimport slurm -from pyslurm.core.common.uint cimport u16 +from pyslurm.utils.uint cimport u16 from pyslurm.slurm cimport ( task_dist_states_t, ) diff --git a/pyslurm/core/job/util.pyx b/pyslurm/core/job/util.pyx index 7b463b2c..31a31638 100644 --- a/pyslurm/core/job/util.pyx +++ b/pyslurm/core/job/util.pyx @@ -24,8 +24,8 @@ from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t from pyslurm cimport slurm -from pyslurm.core.common.uint import * -from pyslurm.core.common.uint cimport * +from pyslurm.utils.uint import * +from pyslurm.utils.uint cimport * # Note: Maybe consider using libslurmfull again to avoid having to reimplement # some of these functions and keeping track for changes in new releases. diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index 3f39ece7..412a290c 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -24,6 +24,7 @@ from libc.string cimport memcpy, memset from pyslurm cimport slurm +from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t from pyslurm.slurm cimport ( node_info_t, node_info_msg_t, @@ -47,14 +48,20 @@ from pyslurm.slurm cimport ( slurm_node_state_string_complete, slurm_node_state_string, cpu_bind_type_t, + xfree, + try_xmalloc, ) +from pyslurm.utils cimport cstr +from pyslurm.utils cimport ctime +from pyslurm.utils.ctime cimport time_t +from pyslurm.utils.uint cimport * cdef class Nodes(dict): """A collection of Node objects. Args: - nodes (Union[list, dict, str], optional): + nodes (Union[list, dict, str], optional=None): Nodes to initialize this collection with. Attributes: @@ -90,21 +97,30 @@ cdef class Node: """A Slurm node. Args: - name (str): + name (str, optional=None): Name of a node - **kwargs: - Any writable property. Writable attributes include: - * name - * configured_gres - * address - * hostname - * extra - * comment - * weight - * available_features - * active_features - * cpu_binding - * state + + Other Parameters: + configured_gres (dict): + Configured GRES for the node + address (str): + Address of the node + hostname (str): + Hostname of the node + extra (str): + Arbitrary extra string + comment (str): + Comment for the node + weight (int): + Weight associated to the node + available_features (list): + Available features for the node + active_features (list): + Active features for the node + cpu_binding (str): + Default CPU-Binding for the node + state (str): + State of the node Attributes: name (str): diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 17429ce1..050f6262 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -22,18 +22,13 @@ # cython: c_string_type=unicode, c_string_encoding=default # cython: language_level=3 -from pyslurm.slurm cimport xfree, try_xmalloc -from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t -from pyslurm.core.common cimport cstr -from pyslurm.core.common import cstr -from pyslurm.core.common cimport ctime -from pyslurm.core.common import ctime -from pyslurm.core.common.ctime cimport time_t -from pyslurm.core.common.uint cimport * -from pyslurm.core.common.uint import * +from typing import Union +from pyslurm.utils import cstr +from pyslurm.utils import ctime +from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc -from pyslurm.core.common.ctime import timestamp_to_date, _raw_time -from pyslurm.core.common import ( +from pyslurm.utils.ctime import timestamp_to_date, _raw_time +from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, humanize, @@ -83,7 +78,7 @@ cdef class Nodes(dict): the Node instances. The default is False. Returns: - (Nodes): Collection of node objects. + (pyslurm.Nodes): Collection of node objects. Raises: RPCError: When getting all the Nodes from the slurmctld failed. @@ -136,8 +131,7 @@ cdef class Nodes(dict): def reload(self): """Reload the information for nodes in a collection. - Note: - Only information for nodes which are already in the collection at + Note: Only information for nodes which are already in the collection at the time of calling this method will be reloaded. Raises: @@ -318,21 +312,21 @@ cdef class Node: Implements the slurm_create_node RPC. Args: - future (str, optional): + state (str, optional): An optional state the created Node should have. Allowed values are "future" and "cloud". "future" is the default. Returns: - (Node): This function returns the current Node-instance object - itself. + (pyslurm.Node): This function returns the current Node-instance + object itself. Raises: RPCError: If creating the Node was not successful. MemoryError: If malloc failed to allocate memory. Examples: - >>> from pyslurm import Node - >>> node = Node("testnode").create() + >>> import pyslurm + >>> node = pyslurm.Node("testnode").create() """ if not self.name: raise ValueError("You need to set a node name first.") @@ -344,43 +338,28 @@ cdef class Node: return self - def modify(self, node=None, **kwargs): + def modify(self, changes): """Modify a node. Implements the slurm_update_node RPC. Args: - node (pyslurm.Node): + changes (pyslurm.Node): Another Node object which contains all the changes that should be applied to this instance. - **kwargs: - You can also specify all the changes as keyword arguments. - Allowed values are only attributes which can actually be set - on a Node instance. If a node is explicitly specified as - parameter, all **kwargs will be ignored. Raises: RPCError: When updating the Node was not successful. Examples: - >>> from pyslurm import Node - >>> - >>> # Setting a new weight for the Node - >>> changes = Node(weight=100) - >>> Node("localhost").modify(changes) + >>> import pyslurm >>> - >>> # Or by specifying the changes directly to the modify function - >>> Node("localhost").modify(weight=100) + >>> mynode = pyslurm.Node("localhost") + >>> changes = pyslurm.Node(weight=100) + >>> # Setting the weight to 100 for the "localhost" node + >>> mynode.modify(changes) """ - cdef Node n = self - - # Allow the user to both specify changes via a Node instance or - # **kwargs. - if node and isinstance(node, Node): - n = node - elif kwargs: - n = Node(**kwargs) - + cdef Node n = changes n._alloc_umsg() cstr.fmalloc(&n.umsg.node_names, self.name) verify_rpc(slurm_update_node(n.umsg)) @@ -395,8 +374,8 @@ cdef class Node: MemoryError: If malloc failed to allocate memory. Examples: - >>> from pyslurm import Node - >>> Node("localhost").delete() + >>> import pyslurm + >>> pyslurm.Node("localhost").delete() """ self._alloc_umsg() verify_rpc(slurm_delete_node(self.umsg)) @@ -406,6 +385,11 @@ cdef class Node: Returns: (dict): Node information as dict + + Examples: + >>> import pyslurm + >>> mynode = pyslurm.Node.load("mynode") + >>> mynode_dict = mynode.as_dict() """ return instance_to_dict(self) diff --git a/pyslurm/core/slurmctld.pxd b/pyslurm/core/slurmctld.pxd index f65655c8..0f42fffb 100644 --- a/pyslurm/core/slurmctld.pxd +++ b/pyslurm/core/slurmctld.pxd @@ -29,9 +29,9 @@ from pyslurm.slurm cimport ( slurm_free_ctl_conf, try_xmalloc, ) -from pyslurm.core.common cimport cstr +from pyslurm.utils cimport cstr from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t, int64_t -from pyslurm.core.common.uint cimport * +from pyslurm.utils.uint cimport * cdef class Config: diff --git a/pyslurm/core/db/__init__.pxd b/pyslurm/db/__init__.pxd similarity index 100% rename from pyslurm/core/db/__init__.pxd rename to pyslurm/db/__init__.pxd diff --git a/pyslurm/core/db/__init__.py b/pyslurm/db/__init__.py similarity index 81% rename from pyslurm/core/db/__init__.py rename to pyslurm/db/__init__.py index a742f72b..bb34e232 100644 --- a/pyslurm/core/db/__init__.py +++ b/pyslurm/db/__init__.py @@ -1,5 +1,5 @@ ######################################################################### -# db/__init__.py - database package __init__ file +# db/__init__.py - pyslurm database api ######################################################################### # Copyright (C) 2023 Toni Harzendorf # @@ -19,18 +19,19 @@ # with PySlurm; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -from pyslurm.core.db.connection import Connection -from pyslurm.core.db.step import JobStep -from pyslurm.core.db.job import ( +from .connection import Connection +from .step import JobStep, JobSteps +from .stats import JobStatistics +from .job import ( Job, Jobs, JobSearchFilter, ) -from pyslurm.core.db.tres import ( +from .tres import ( TrackableResource, TrackableResources, ) -from pyslurm.core.db.qos import ( +from .qos import ( QualitiesOfService, QualityOfService, QualityOfServiceSearchFilter, diff --git a/pyslurm/core/db/connection.pxd b/pyslurm/db/connection.pxd similarity index 100% rename from pyslurm/core/db/connection.pxd rename to pyslurm/db/connection.pxd diff --git a/pyslurm/core/db/connection.pyx b/pyslurm/db/connection.pyx similarity index 86% rename from pyslurm/core/db/connection.pyx rename to pyslurm/db/connection.pyx index ff32dd92..eab6572d 100644 --- a/pyslurm/core/db/connection.pyx +++ b/pyslurm/db/connection.pyx @@ -46,7 +46,11 @@ cdef class Connection: RPCError: When opening the connection fails Returns: - (Connection): Connection to slurmdbd + (pyslurm.db.Connection): Connection to slurmdbd + + Examples: + >>> import pyslurm + >>> connection = pyslurm.db.Connection.open() """ cdef Connection conn = Connection.__new__(Connection) conn.ptr = slurmdb_connection_get(&conn.flags) @@ -56,7 +60,14 @@ cdef class Connection: return conn def close(self): - """Close the current connection.""" + """Close the current connection. + + Examples: + >>> import pyslurm + >>> connection = pyslurm.db.Connection.open() + >>> ... + >>> connection.close() + """ if self.is_open: slurmdb_connection_close(&self.ptr) self.ptr = NULL diff --git a/pyslurm/core/db/job.pxd b/pyslurm/db/job.pxd similarity index 94% rename from pyslurm/core/db/job.pxd rename to pyslurm/db/job.pxd index 2b220a05..08673682 100644 --- a/pyslurm/core/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -40,23 +40,24 @@ from pyslurm.slurm cimport ( slurm_job_state_string, slurm_job_reason_string, ) -from pyslurm.core.db.util cimport ( +from pyslurm.db.util cimport ( SlurmList, SlurmListItem, make_char_list, ) -from pyslurm.core.db.step cimport JobStep, JobSteps -from pyslurm.core.db.stats cimport JobStats -from pyslurm.core.db.connection cimport Connection -from pyslurm.core.common cimport cstr -from pyslurm.core.db.qos cimport QualitiesOfService +from pyslurm.db.step cimport JobStep, JobSteps +from pyslurm.db.stats cimport JobStatistics +from pyslurm.db.connection cimport Connection +from pyslurm.utils cimport cstr +from pyslurm.db.qos cimport QualitiesOfService +from pyslurm.db.tres cimport TrackableResources, TrackableResource cdef class JobSearchFilter: """Search conditions for Slurm database Jobs. Args: - **kwargs: + **kwargs (Any, optional=None): Any valid attribute of the object. Attributes: @@ -148,7 +149,7 @@ cdef class JobSearchFilter: cdef class Jobs(dict): - """A collection of Database Jobs.""" + """A collection of [`pyslurm.db.Job`][] objects.""" cdef: SlurmList info Connection db_conn @@ -158,7 +159,7 @@ cdef class Job: """A Slurm Database Job. Args: - job_id (int): + job_id (int, optional=0): An Integer representing a Job-ID. Raises: @@ -167,7 +168,7 @@ cdef class Job: Attributes: steps (pyslurm.db.JobSteps): Steps this Job has - stats (pyslurm.db.JobStats): + stats (pyslurm.db.JobStatistics): Utilization statistics of this Job account (str): Account of the Job. @@ -273,7 +274,7 @@ cdef class Job: cdef public: JobSteps steps - JobStats stats + JobStatistics stats @staticmethod cdef Job from_ptr(slurmdb_job_rec_t *in_ptr) diff --git a/pyslurm/core/db/job.pyx b/pyslurm/db/job.pyx similarity index 92% rename from pyslurm/core/db/job.pyx rename to pyslurm/db/job.pyx index d66f789e..50fd0c8c 100644 --- a/pyslurm/core/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -23,16 +23,17 @@ # cython: language_level=3 from os import WIFSIGNALED, WIFEXITED, WTERMSIG, WEXITSTATUS +from typing import Union from pyslurm.core.error import RPCError -from pyslurm.core.db.tres cimport TrackableResources, TrackableResource from pyslurm.core import slurmctld -from pyslurm.core.common.uint import * -from pyslurm.core.common.ctime import ( +from typing import Any +from pyslurm.utils.uint import * +from pyslurm.utils.ctime import ( date_to_timestamp, timestr_to_mins, _raw_time, ) -from pyslurm.core.common import ( +from pyslurm.utils.helpers import ( gid_to_name, group_to_gid, user_to_uid, @@ -192,7 +193,7 @@ cdef class JobSearchFilter: cdef class Jobs(dict): - def __init__(self, *args, **kwargs): + def __init__(self): # TODO: ability to initialize with existing job objects pass @@ -210,6 +211,21 @@ cdef class Jobs(dict): Raises: RPCError: When getting the Jobs from the Database was not sucessful + + Examples: + Without a Filter the default behaviour applies, which is + simply retrieving all Jobs from the same day: + + >>> import pyslurm + >>> db_jobs = pyslurm.db.Jobs.load() + + Now with a Job Filter, so only Jobs that have specific Accounts + are returned: + + >>> import pyslurm + >>> accounts = ["acc1", "acc2"] + >>> search_filter = pyslurm.db.JobSearchFilter(accounts=accounts) + >>> db_jobs = pyslurm.db.Jobs.load(search_filter) """ cdef: Jobs jobs = Jobs() @@ -245,7 +261,7 @@ cdef class Jobs(dict): job = Job.from_ptr(job_ptr.data) job.qos_data = qos_data job._create_steps() - JobStats._sum_step_stats_for_job(job, job.steps) + JobStatistics._sum_step_stats_for_job(job, job.steps) jobs[job.id] = job return jobs @@ -256,7 +272,7 @@ cdef class Job: def __cinit__(self): self.ptr = NULL - def __init__(self, job_id): + def __init__(self, job_id=0): self._alloc_impl() self.ptr.jobid = int(job_id) @@ -279,7 +295,7 @@ cdef class Job: cdef Job wrap = Job.__new__(Job) wrap.ptr = in_ptr wrap.steps = JobSteps.__new__(JobSteps) - wrap.stats = JobStats() + wrap.stats = JobStatistics() return wrap @staticmethod @@ -291,11 +307,24 @@ cdef class Job: ID of the Job to be loaded. Returns: - (pyslurm.db.Job): Returns a new Job instance + (pyslurm.Job): Returns a new Database Job instance Raises: RPCError: If requesting the information for the database Job was not sucessful. + + Examples: + >>> import pyslurm + >>> db_job = pyslurm.db.Job.load(10000) + + In the above example, attribute like "script" and "environment" + are not populated. You must explicitly request one of them to be + loaded: + + >>> import pyslurm + >>> db_job = pyslurm.db.Job.load(10000, with_script=True) + >>> print(db_job.script) + """ jfilter = JobSearchFilter(ids=[int(job_id)], with_script=with_script, with_env=with_env) @@ -321,6 +350,11 @@ cdef class Job: Returns: (dict): Database Job information as dict + + Examples: + >>> import pyslurm + >>> myjob = pyslurm.db.Job.load(10000) + >>> myjob_dict = myjob.as_dict() """ cdef dict out = instance_to_dict(self) diff --git a/pyslurm/core/db/qos.pxd b/pyslurm/db/qos.pxd similarity index 93% rename from pyslurm/core/db/qos.pxd rename to pyslurm/db/qos.pxd index 3ba59dc6..b2b0bcf9 100644 --- a/pyslurm/core/db/qos.pxd +++ b/pyslurm/db/qos.pxd @@ -32,13 +32,13 @@ from pyslurm.slurm cimport ( slurm_preempt_mode_num, try_xmalloc, ) -from pyslurm.core.db.util cimport ( +from pyslurm.db.util cimport ( SlurmList, SlurmListItem, make_char_list, ) -from pyslurm.core.db.connection cimport Connection -from pyslurm.core.common cimport cstr +from pyslurm.db.connection cimport Connection +from pyslurm.utils cimport cstr cdef class QualitiesOfService(dict): diff --git a/pyslurm/core/db/qos.pyx b/pyslurm/db/qos.pyx similarity index 97% rename from pyslurm/core/db/qos.pyx rename to pyslurm/db/qos.pyx index bd5a35de..2851587e 100644 --- a/pyslurm/core/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -23,9 +23,7 @@ # cython: language_level=3 from pyslurm.core.error import RPCError -from pyslurm.core.common import ( - instance_to_dict, -) +from pyslurm.utils.helpers import instance_to_dict cdef class QualitiesOfService(dict): @@ -163,7 +161,7 @@ cdef class QualityOfService: Name of the Quality of Service to be loaded. Returns: - (pyslurm.db.QualityOfService): Returns a new QualityOfService + (QualityOfService): Returns a new QualityOfService instance. Raises: diff --git a/pyslurm/core/db/stats.pxd b/pyslurm/db/stats.pxd similarity index 95% rename from pyslurm/core/db/stats.pxd rename to pyslurm/db/stats.pxd index 1f321ab2..682143e6 100644 --- a/pyslurm/core/db/stats.pxd +++ b/pyslurm/db/stats.pxd @@ -28,13 +28,13 @@ from pyslurm.slurm cimport ( slurmdb_stats_t, slurmdb_job_rec_t, ) -from pyslurm.core.db.tres cimport TrackableResources -from pyslurm.core.db.step cimport JobStep, JobSteps -from pyslurm.core.db.job cimport Job -from pyslurm.core.common cimport cstr +from pyslurm.db.tres cimport TrackableResources +from pyslurm.db.step cimport JobStep, JobSteps +from pyslurm.db.job cimport Job +from pyslurm.utils cimport cstr -cdef class JobStats: +cdef class JobStatistics: """Statistics for a Slurm Job or Step. Note: @@ -139,5 +139,5 @@ cdef class JobStats: system_cpu_time @staticmethod - cdef JobStats from_step(JobStep step) + cdef JobStatistics from_step(JobStep step) diff --git a/pyslurm/core/db/stats.pyx b/pyslurm/db/stats.pyx similarity index 97% rename from pyslurm/core/db/stats.pyx rename to pyslurm/db/stats.pyx index bd6606a0..3ae0c8b5 100644 --- a/pyslurm/core/db/stats.pyx +++ b/pyslurm/db/stats.pyx @@ -22,13 +22,13 @@ # cython: c_string_type=unicode, c_string_encoding=default # cython: language_level=3 -from pyslurm.core.common import ( +from pyslurm.utils.helpers import ( nodelist_from_range_str, instance_to_dict, ) -cdef class JobStats: +cdef class JobStatistics: def __init__(self): for attr, val in instance_to_dict(self).items(): @@ -51,8 +51,8 @@ cdef class JobStats: return instance_to_dict(self) @staticmethod - cdef JobStats from_step(JobStep step): - cdef JobStats wrap = JobStats() + cdef JobStatistics from_step(JobStep step): + cdef JobStatistics wrap = JobStatistics() if not &step.ptr.stats: return wrap @@ -143,8 +143,8 @@ cdef class JobStats: @staticmethod def _sum_step_stats_for_job(Job job, JobSteps steps): cdef: - JobStats job_stats = job.stats - JobStats step_stats = None + JobStatistics job_stats = job.stats + JobStatistics step_stats = None for step in steps.values(): step_stats = step.stats diff --git a/pyslurm/core/db/step.pxd b/pyslurm/db/step.pxd similarity index 88% rename from pyslurm/core/db/step.pxd rename to pyslurm/db/step.pxd index 77d45cd2..d13e3da7 100644 --- a/pyslurm/core/db/step.pxd +++ b/pyslurm/db/step.pxd @@ -36,21 +36,23 @@ from pyslurm.slurm cimport ( slurm_job_state_string, slurm_job_reason_string, ) -from pyslurm.core.db.util cimport SlurmList, SlurmListItem -from pyslurm.core.db.connection cimport Connection -from pyslurm.core.common cimport cstr -from pyslurm.core.db.stats cimport JobStats +from pyslurm.db.util cimport SlurmList, SlurmListItem +from pyslurm.db.connection cimport Connection +from pyslurm.utils cimport cstr +from pyslurm.db.stats cimport JobStatistics +from pyslurm.db.tres cimport TrackableResources, TrackableResource cdef class JobSteps(dict): + """A collection of [`pyslurm.db.JobStep`][] objects""" pass cdef class JobStep: - """A Slurm Database Job-step. + """A Slurm Database JobStep. Attributes: - stats (pyslurm.db.JobStats): + stats (pyslurm.db.JobStatistics): Utilization statistics for this Step num_nodes (int): Amount of nodes this Step has allocated @@ -94,7 +96,7 @@ cdef class JobStep: Amount of seconds the Step was suspended """ cdef slurmdb_step_rec_t *ptr - cdef public JobStats stats + cdef public JobStatistics stats @staticmethod cdef JobStep from_ptr(slurmdb_step_rec_t *step) diff --git a/pyslurm/core/db/step.pyx b/pyslurm/db/step.pyx similarity index 95% rename from pyslurm/core/db/step.pyx rename to pyslurm/db/step.pyx index aa1bd612..6e33c8d1 100644 --- a/pyslurm/core/db/step.pyx +++ b/pyslurm/db/step.pyx @@ -24,10 +24,10 @@ from os import WIFSIGNALED, WIFEXITED, WTERMSIG, WEXITSTATUS from pyslurm.core.error import RPCError -from pyslurm.core.db.tres cimport TrackableResources, TrackableResource -from pyslurm.core.common.uint import * -from pyslurm.core.common.ctime import _raw_time -from pyslurm.core.common import ( +from typing import Union +from pyslurm.utils.uint import * +from pyslurm.utils.ctime import _raw_time +from pyslurm.utils.helpers import ( gid_to_name, uid_to_name, instance_to_dict, @@ -53,7 +53,7 @@ cdef class JobStep: cdef JobStep from_ptr(slurmdb_step_rec_t *step): cdef JobStep wrap = JobStep.__new__(JobStep) wrap.ptr = step - wrap.stats = JobStats.from_step(wrap) + wrap.stats = JobStatistics.from_step(wrap) return wrap def as_dict(self): diff --git a/pyslurm/core/db/tres.pxd b/pyslurm/db/tres.pxd similarity index 97% rename from pyslurm/core/db/tres.pxd rename to pyslurm/db/tres.pxd index f08bb3df..40d28799 100644 --- a/pyslurm/core/db/tres.pxd +++ b/pyslurm/db/tres.pxd @@ -21,7 +21,7 @@ # cython: language_level=3 from pyslurm cimport slurm -from pyslurm.core.common cimport cstr +from pyslurm.utils cimport cstr from libc.stdint cimport uint64_t from pyslurm.slurm cimport ( slurmdb_tres_rec_t, diff --git a/pyslurm/core/db/tres.pyx b/pyslurm/db/tres.pyx similarity index 98% rename from pyslurm/core/db/tres.pyx rename to pyslurm/db/tres.pyx index 1e77994b..f4e84130 100644 --- a/pyslurm/core/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -22,7 +22,7 @@ # cython: c_string_type=unicode, c_string_encoding=default # cython: language_level=3 -from pyslurm.core.common.uint import * +from pyslurm.utils.uint import * cdef class TrackableResources(dict): diff --git a/pyslurm/core/db/util.pxd b/pyslurm/db/util.pxd similarity index 97% rename from pyslurm/core/db/util.pxd rename to pyslurm/db/util.pxd index deb71ed4..2e9498a6 100644 --- a/pyslurm/core/db/util.pxd +++ b/pyslurm/db/util.pxd @@ -21,7 +21,7 @@ # cython: language_level=3 from pyslurm cimport slurm -from pyslurm.core.common cimport cstr +from pyslurm.utils cimport cstr from pyslurm.slurm cimport ( ListIterator, List, diff --git a/pyslurm/core/db/util.pyx b/pyslurm/db/util.pyx similarity index 100% rename from pyslurm/core/db/util.pyx rename to pyslurm/db/util.pyx diff --git a/pyslurm/utils/__init__.pxd b/pyslurm/utils/__init__.pxd new file mode 100644 index 00000000..7a22bfae --- /dev/null +++ b/pyslurm/utils/__init__.pxd @@ -0,0 +1,2 @@ +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 diff --git a/pyslurm/utils/__init__.py b/pyslurm/utils/__init__.py new file mode 100644 index 00000000..eae6e6ed --- /dev/null +++ b/pyslurm/utils/__init__.py @@ -0,0 +1,44 @@ +######################################################################### +# utils.py - pyslurm utility functions +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""pyslurm utility functions""" + +# Utility time functions +from .ctime import ( + timestr_to_secs, + timestr_to_mins, + secs_to_timestr, + mins_to_timestr, + date_to_timestamp, + timestamp_to_date, +) + +# General utility functions +from .helpers import ( + uid_to_name, + gid_to_name, + user_to_uid, + group_to_gid, + expand_range_str, + humanize, + dehumanize, + nodelist_from_range_str, + nodelist_to_range_str, +) diff --git a/pyslurm/core/common/cstr.pxd b/pyslurm/utils/cstr.pxd similarity index 100% rename from pyslurm/core/common/cstr.pxd rename to pyslurm/utils/cstr.pxd diff --git a/pyslurm/core/common/cstr.pyx b/pyslurm/utils/cstr.pyx similarity index 100% rename from pyslurm/core/common/cstr.pyx rename to pyslurm/utils/cstr.pyx diff --git a/pyslurm/core/common/ctime.pxd b/pyslurm/utils/ctime.pxd similarity index 97% rename from pyslurm/core/common/ctime.pxd rename to pyslurm/utils/ctime.pxd index d8abb12d..b9bde543 100644 --- a/pyslurm/core/common/ctime.pxd +++ b/pyslurm/utils/ctime.pxd @@ -23,7 +23,7 @@ # cython: language_level=3 from pyslurm cimport slurm -from pyslurm.core.common cimport cstr +from pyslurm.utils cimport cstr from libc.stdint cimport uint32_t cdef extern from 'time.h' nogil: diff --git a/pyslurm/core/common/ctime.pyx b/pyslurm/utils/ctime.pyx similarity index 95% rename from pyslurm/core/common/ctime.pyx rename to pyslurm/utils/ctime.pyx index fdf68834..5ffbc424 100644 --- a/pyslurm/core/common/ctime.pyx +++ b/pyslurm/utils/ctime.pyx @@ -33,7 +33,7 @@ def timestr_to_secs(timestr): A Timestring compatible with Slurms time functions. Returns: - int: Amount of time in seconds + (int): Amount of time in seconds """ cdef: char *tmp = NULL @@ -64,7 +64,7 @@ def timestr_to_mins(timestr): A Timestring compatible with Slurms time functions. Returns: - int: Amount of time in minutes + (int): Amount of time in minutes """ cdef: char *tmp = NULL @@ -92,7 +92,7 @@ def secs_to_timestr(secs, default=None): Amount of seconds to convert Returns: - str: A Slurm timestring + (str): A Slurm timestring """ cdef char time_line[32] @@ -122,7 +122,7 @@ def mins_to_timestr(mins, default=None): Amount of minutes to convert Returns: - str: A Slurm timestring + (str): A Slurm timestring """ cdef char time_line[32] @@ -152,7 +152,7 @@ def date_to_timestamp(date, on_nodate=0): A date to convert to a Unix timestamp. Returns: - int: A unix timestamp + (int): A unix timestamp """ cdef: time_t tmp_time @@ -185,7 +185,7 @@ def timestamp_to_date(timestamp): A Unix timestamp that should be converted. Returns: - str: A Slurm date timestring + (str): A Slurm date timestring """ cdef: char time_str[32] diff --git a/pyslurm/core/common/__init__.pxd b/pyslurm/utils/helpers.pxd similarity index 93% rename from pyslurm/core/common/__init__.pxd rename to pyslurm/utils/helpers.pxd index 7915de2f..5de4cf99 100644 --- a/pyslurm/core/common/__init__.pxd +++ b/pyslurm/utils/helpers.pxd @@ -1,5 +1,5 @@ ######################################################################### -# common/__init__.pxd - common/utility functions +# helpers.pxd - basic helper functions ######################################################################### # Copyright (C) 2023 Toni Harzendorf # @@ -25,7 +25,7 @@ from pyslurm cimport slurm from pyslurm.slurm cimport xfree, try_xmalloc, xmalloc from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t -from pyslurm.core.common cimport cstr +from pyslurm.utils cimport cstr from libc.stdlib cimport free cpdef uid_to_name(uint32_t uid, err_on_invalid=*, dict lookup=*) diff --git a/pyslurm/core/common/__init__.pyx b/pyslurm/utils/helpers.pyx similarity index 96% rename from pyslurm/core/common/__init__.pyx rename to pyslurm/utils/helpers.pyx index 6ad5ae47..3617112e 100644 --- a/pyslurm/core/common/__init__.pyx +++ b/pyslurm/utils/helpers.pyx @@ -1,5 +1,5 @@ ######################################################################### -# common/__init__.pyx - common/utility functions +# helpers.pyx - basic helper functions ######################################################################### # Copyright (C) 2023 Toni Harzendorf # @@ -142,7 +142,7 @@ def expand_range_str(range_str): "1,2,3-10,11,15-20" Returns: - list: List of unique values + (list): List of unique values """ ret = [] for mrange in range_str.split(","): @@ -166,7 +166,7 @@ def nodelist_from_range_str(nodelist): and ranges. Returns: - list: List of all nodenames or None on failure + (list): List of all nodenames or None on failure """ if isinstance(nodelist, list): nodelist = ",".join(nodelist) @@ -197,7 +197,7 @@ def nodelist_to_range_str(nodelist): Comma-seperated str or list with unique, unbracketed nodenames. Returns: - str: Bracketed, ranged nodelist or None on failure. + (str): Bracketed, ranged nodelist or None on failure. """ if isinstance(nodelist, list): nodelist = ",".join(nodelist) @@ -233,7 +233,7 @@ def humanize(num, decimals=1): Amount of decimals the humanized string should have. Returns: - str: Humanized number with appropriate suffix. + (str): Humanized number with appropriate suffix. """ if num is None or num == "unlimited": return num @@ -260,7 +260,7 @@ def dehumanize(humanized_str, target="M", decimals=0): Amount of decimal places the result should have. Default is 0 Returns: - int: Dehumanized value + (int): Dehumanized value """ if not humanized_str: return None diff --git a/pyslurm/core/common/uint.pxd b/pyslurm/utils/uint.pxd similarity index 100% rename from pyslurm/core/common/uint.pxd rename to pyslurm/utils/uint.pxd diff --git a/pyslurm/core/common/uint.pyx b/pyslurm/utils/uint.pyx similarity index 100% rename from pyslurm/core/common/uint.pyx rename to pyslurm/utils/uint.pyx diff --git a/tests/integration/test_job_steps.py b/tests/integration/test_job_steps.py index 4ad2de39..bd17a188 100644 --- a/tests/integration/test_job_steps.py +++ b/tests/integration/test_job_steps.py @@ -142,7 +142,7 @@ def test_modify(submit_job): step.modify(JobStep(time_limit="00:05:00")) assert JobStep.load(job, 0).time_limit == 5 - step.modify(time_limit="00:15:00") + step.modify(JobStep(time_limit="00:15:00")) assert JobStep.load(job, 0).time_limit == 15 diff --git a/tests/integration/test_node.py b/tests/integration/test_node.py index 3e1306da..fb6f5197 100644 --- a/tests/integration/test_node.py +++ b/tests/integration/test_node.py @@ -58,7 +58,7 @@ def test_create(): def test_modify(): node = Node(Nodes.load().as_list()[0].name) - node.modify(weight=10000) + node.modify(Node(weight=10000)) assert Node.load(node.name).weight == 10000 node.modify(Node(weight=20000)) diff --git a/tests/unit/test_common.py b/tests/unit/test_common.py index ca3f1cfd..e8bf85a2 100644 --- a/tests/unit/test_common.py +++ b/tests/unit/test_common.py @@ -24,7 +24,7 @@ import pytest import datetime from pyslurm import Job, JobSubmitDescription, Node -from pyslurm.core.common.ctime import ( +from pyslurm.utils.ctime import ( timestr_to_mins, timestr_to_secs, mins_to_timestr, @@ -32,7 +32,7 @@ date_to_timestamp, timestamp_to_date, ) -from pyslurm.core.common.uint import ( +from pyslurm.utils.uint import ( u8, u16, u32, @@ -42,7 +42,7 @@ u32_parse, u64_parse, ) -from pyslurm.core.common import ( +from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, user_to_uid, @@ -56,7 +56,7 @@ nodelist_to_range_str, _sum_prop, ) -from pyslurm.core.common import cstr +from pyslurm.utils import cstr class TestTypes: diff --git a/tests/unit/test_db_slurm_list.py b/tests/unit/test_db_slurm_list.py index 41df371c..6d770bcf 100644 --- a/tests/unit/test_db_slurm_list.py +++ b/tests/unit/test_db_slurm_list.py @@ -22,7 +22,7 @@ import pytest import pyslurm -from pyslurm.core.db.util import SlurmList +from pyslurm.db.util import SlurmList def test_create_and_destroy_list(): From 4ec608ef67af3e370138d53809e7b83a82e8e656 Mon Sep 17 00:00:00 2001 From: wpoely86 Date: Fri, 5 May 2023 20:03:43 +0200 Subject: [PATCH 18/48] drop the doc/ directory in doc_files for building rpm (#286) --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 78d52108..ba3ad0b6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,7 +8,6 @@ include = pyslurm, pyslurm.* release = 1 packager = Giovanni Torres doc_files = README.md - doc/ examples/ build_requires = python3-devel >= 3.6 slurm-devel >= 23.02.0 From be9395c9332a38b87115c5b5521981f9ea30539e Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Fri, 5 May 2023 22:52:15 +0200 Subject: [PATCH 19/48] some more doc fixes (#287) * Some more docstring enhancements * clean before building docs * enable autorefs and more markdown extensions --- mkdocs.yml | 9 +++++++++ pyslurm/core/job/job.pxd | 2 +- pyslurm/core/job/job.pyx | 22 +++++++++------------- pyslurm/core/job/step.pxd | 2 +- pyslurm/core/job/submission.pxd | 4 ++-- pyslurm/core/job/submission.pyx | 24 ++++++++++++++++++++++++ pyslurm/core/node.pxd | 2 +- pyslurm/core/node.pyx | 4 +++- pyslurm/db/job.pxd | 2 +- pyslurm/db/stats.pxd | 3 ++- pyslurm/db/step.pxd | 2 +- scripts/builddocs.sh | 1 + 12 files changed, 55 insertions(+), 22 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index bd62c384..757dcd2d 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -25,6 +25,7 @@ theme: name: Switch to dark mode - media: "(prefers-color-scheme: dark)" scheme: slate + accent: lime toggle: icon: material/brightness-4 name: Switch to light mode @@ -35,6 +36,7 @@ theme: plugins: - search - awesome-pages + - autorefs - mike - mkdocstrings: handlers: @@ -51,6 +53,13 @@ markdown_extensions: - admonition - pymdownx.snippets: check_paths: true + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + - pymdownx.inlinehilite + - pymdownx.superfences + - pymdownx.details extra: version: diff --git a/pyslurm/core/job/job.pxd b/pyslurm/core/job/job.pxd index 5e8dfd4f..d1c8ddf8 100644 --- a/pyslurm/core/job/job.pxd +++ b/pyslurm/core/job/job.pxd @@ -68,7 +68,7 @@ from pyslurm.slurm cimport ( cdef class Jobs(dict): - """A collection of Job objects. + """A collection of [pyslurm.Job][] objects. Args: jobs (Union[list, dict], optional=None): diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 7f5beae3..bab6ca28 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -167,8 +167,9 @@ cdef class Jobs(dict): This function fills in the "steps" attribute for all Jobs in the collection. - Note: Pending Jobs will be ignored, since they don't have any Steps - yet. + !!! note + + Pending Jobs will be ignored, since they don't have any Steps yet. Raises: RPCError: When retrieving the Job information for all the Steps @@ -241,7 +242,8 @@ cdef class Job: Implements the slurm_load_job RPC. - Note: + !!! note + If the Job is not pending, the related Job steps will also be loaded. @@ -454,8 +456,6 @@ cdef class Job: release the Job again. If you specify the mode as "user", the User will also be able to release the job. - Note: Uses the modify() function to set the Job's priority to 0. - Raises: RPCError: When holding the Job was not successful. @@ -478,9 +478,6 @@ cdef class Job: def release(self): """Release a currently held Job, allowing it to be scheduled again. - Note: Uses the modify() function to reset the priority back to - be controlled by the slurmctld's priority calculation routine. - Raises: RPCError: When releasing a held Job was not successful. @@ -1223,14 +1220,13 @@ cdef class Job: def get_resource_layout_per_node(self): """Retrieve the resource layout of this Job on each node. - The dict returned contains the following information for each node: - * cpu_ids (str) - * gres (dict) - * memory (int) + !!! warning + + Return type may still be subject to change in the future Returns: (dict): Resource layout, where the key is the name of the name and - its value another dict with the components described above. + its value another dict with the CPU-ids, memory and gres. """ # The code for this function is a modified reimplementation from here: # https://github.com/SchedMD/slurm/blob/d525b6872a106d32916b33a8738f12510ec7cf04/src/api/job_info.c#L739 diff --git a/pyslurm/core/job/step.pxd b/pyslurm/core/job/step.pxd index ed61d4d9..087742d6 100644 --- a/pyslurm/core/job/step.pxd +++ b/pyslurm/core/job/step.pxd @@ -50,7 +50,7 @@ from pyslurm.core.job.task_dist cimport TaskDistribution cdef class JobSteps(dict): - """A collection of [`pyslurm.JobStep`][] objects for a given Job. + """A collection of [pyslurm.JobStep][] objects for a given Job. Args: job (Union[Job, int]): diff --git a/pyslurm/core/job/submission.pxd b/pyslurm/core/job/submission.pxd index f9c9d877..1547f5de 100644 --- a/pyslurm/core/job/submission.pxd +++ b/pyslurm/core/job/submission.pxd @@ -312,8 +312,8 @@ cdef class JobSubmitDescription: * "no" or "exclusive" No sharing of resources is allowed. (--exclusive from sbatch) - distribution (Union[dict, str]): - TODO + distribution (str): + Task distribution for the Job, same as --distribution from sbatch time_limit (str): The time limit for the job. This is the same as -t/--time from sbatch. diff --git a/pyslurm/core/job/submission.pyx b/pyslurm/core/job/submission.pyx index f023f035..50613937 100644 --- a/pyslurm/core/job/submission.pyx +++ b/pyslurm/core/job/submission.pyx @@ -105,11 +105,35 @@ cdef class JobSubmitDescription: def load_environment(self, overwrite=False): """Load values of attributes provided through the environment. + !!! note + + Instead of `SBATCH_`, pyslurm uses `PYSLURM_JOBDESC_` as a prefix to + identify environment variables which should be used to set + attributes. + Args: overwrite (bool): If set to True, the value from an option found in the environment will override the current value of the attribute in this instance. Default is False + + Examples: + Lets consider you want to set the name of the Job and its + Account name. Therefore, you will need to have set these two + environment variables: + + ```bash + export PYSLURM_JOBDESC_ACCOUNT="myaccount" + export PYSLURM_JOBDESC_NAME="myjobname" + ``` + + In python, you can do this now: + + >>> import pyslurm + >>> desc = pyslurm.JobSubmitDescription(...other args...) + >>> desc.load_environment() + >>> print(desc.name, desc.account) + myjobname, myaccount """ self._parse_env(overwrite) diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index 412a290c..9ddb7000 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -58,7 +58,7 @@ from pyslurm.utils.uint cimport * cdef class Nodes(dict): - """A collection of Node objects. + """A collection of [pyslurm.Node][] objects. Args: nodes (Union[list, dict, str], optional=None): diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 050f6262..f869b2ab 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -131,7 +131,9 @@ cdef class Nodes(dict): def reload(self): """Reload the information for nodes in a collection. - Note: Only information for nodes which are already in the collection at + !!! note + + Only information for nodes which are already in the collection at the time of calling this method will be reloaded. Raises: diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index 08673682..28ba5423 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -149,7 +149,7 @@ cdef class JobSearchFilter: cdef class Jobs(dict): - """A collection of [`pyslurm.db.Job`][] objects.""" + """A collection of [pyslurm.db.Job][] objects.""" cdef: SlurmList info Connection db_conn diff --git a/pyslurm/db/stats.pxd b/pyslurm/db/stats.pxd index 682143e6..1ca9c701 100644 --- a/pyslurm/db/stats.pxd +++ b/pyslurm/db/stats.pxd @@ -37,7 +37,8 @@ from pyslurm.utils cimport cstr cdef class JobStatistics: """Statistics for a Slurm Job or Step. - Note: + !!! note + For more information also see the sacct manpage. Attributes: diff --git a/pyslurm/db/step.pxd b/pyslurm/db/step.pxd index d13e3da7..aef7120b 100644 --- a/pyslurm/db/step.pxd +++ b/pyslurm/db/step.pxd @@ -44,7 +44,7 @@ from pyslurm.db.tres cimport TrackableResources, TrackableResource cdef class JobSteps(dict): - """A collection of [`pyslurm.db.JobStep`][] objects""" + """A collection of [pyslurm.db.JobStep][] objects""" pass diff --git a/scripts/builddocs.sh b/scripts/builddocs.sh index cc5625c2..b56482f1 100755 --- a/scripts/builddocs.sh +++ b/scripts/builddocs.sh @@ -1,5 +1,6 @@ #!/bin/bash +python setup.py clean pip install -r doc_requirements.txt pip install --no-build-isolation -e . mkdocs build From 788f445f7fa528f2c5533270a1865c67b9d4e25b Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Sat, 6 May 2023 20:09:58 +0200 Subject: [PATCH 20/48] just use the copyright notice again that was also previously used (#288) "PySlurm Developers" also just sounds a bit nicer actually --- mkdocs.yml | 2 +- pyslurm/slurm/slurm.h.pxi | 11 +++++++---- pyslurm/slurm/slurm_errno.h.pxi | 11 +++++++---- pyslurm/slurm/slurm_version.h.pxi | 2 +- pyslurm/slurm/slurmdb.h.pxi | 11 +++++++---- scripts/pyslurm_bindgen.py | 9 ++++++--- 6 files changed, 29 insertions(+), 17 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index 757dcd2d..c0379d74 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -3,7 +3,7 @@ site_name: "pyslurm" site_url: "https://pyslurm.github.io" repo_url: "https://github.com/PySlurm/pyslurm" repo_name: "PySlurm/pyslurm" -copyright: Copyright © 2023 The PySlurm Authors +copyright: Copyright © 2023 PySlurm Developers theme: name: "material" diff --git a/pyslurm/slurm/slurm.h.pxi b/pyslurm/slurm/slurm.h.pxi index 440a31cb..3605e5a7 100644 --- a/pyslurm/slurm/slurm.h.pxi +++ b/pyslurm/slurm/slurm.h.pxi @@ -9,7 +9,7 @@ # * C-Macros are listed with their appropriate uint type # * Any definitions that cannot be translated are not included in this file # -# Generated on 2023-04-30T11:54:32.116465 +# Generated on 2023-05-06T18:02:46.408139 # # The Original Copyright notice from slurm.h has been included # below: @@ -26,11 +26,14 @@ # Written by Morris Jette , et. al. # CODE-OCEC-09-009. All rights reserved. # -# Please also check the DISCLAIMER file in the Slurm repository here: -# https://github.com/SchedMD/slurm/blob/master/DISCLAIMER +# Slurm is licensed under the GNU GPLv2. For the full text of Slurm's License, +# please see here: pyslurm/slurm/SLURM_LICENSE +# +# Please, as mentioned above, also have a look at Slurm's DISCLAIMER under +# pyslurm/slurm/SLURM_DISCLAIMER ############################################################################## # -# Copyright (C) 2023 The PySlurm Authors (Modifications as described above) +# Copyright (C) 2023 PySlurm Developers (Modifications as described above) # # This file is part of PySlurm # diff --git a/pyslurm/slurm/slurm_errno.h.pxi b/pyslurm/slurm/slurm_errno.h.pxi index c1c6517f..3ed2d122 100644 --- a/pyslurm/slurm/slurm_errno.h.pxi +++ b/pyslurm/slurm/slurm_errno.h.pxi @@ -9,7 +9,7 @@ # * C-Macros are listed with their appropriate uint type # * Any definitions that cannot be translated are not included in this file # -# Generated on 2023-04-30T11:54:32.011184 +# Generated on 2023-05-06T18:02:46.304407 # # The Original Copyright notice from slurm_errno.h has been included # below: @@ -24,11 +24,14 @@ # Jim Garlick , et. al. # CODE-OCEC-09-009. All rights reserved. # -# Please also check the DISCLAIMER file in the Slurm repository here: -# https://github.com/SchedMD/slurm/blob/master/DISCLAIMER +# Slurm is licensed under the GNU GPLv2. For the full text of Slurm's License, +# please see here: pyslurm/slurm/SLURM_LICENSE +# +# Please, as mentioned above, also have a look at Slurm's DISCLAIMER under +# pyslurm/slurm/SLURM_DISCLAIMER ############################################################################## # -# Copyright (C) 2023 The PySlurm Authors (Modifications as described above) +# Copyright (C) 2023 PySlurm Developers (Modifications as described above) # # This file is part of PySlurm # diff --git a/pyslurm/slurm/slurm_version.h.pxi b/pyslurm/slurm/slurm_version.h.pxi index a7710f93..f9c4f5c6 100644 --- a/pyslurm/slurm/slurm_version.h.pxi +++ b/pyslurm/slurm/slurm_version.h.pxi @@ -1,4 +1,4 @@ -# Copyright (C) 2023 The PySlurm Authors +# Copyright (C) 2023 PySlurm Developers # # This file is part of PySlurm # diff --git a/pyslurm/slurm/slurmdb.h.pxi b/pyslurm/slurm/slurmdb.h.pxi index 00b22f36..d4c16e4e 100644 --- a/pyslurm/slurm/slurmdb.h.pxi +++ b/pyslurm/slurm/slurmdb.h.pxi @@ -9,7 +9,7 @@ # * C-Macros are listed with their appropriate uint type # * Any definitions that cannot be translated are not included in this file # -# Generated on 2023-04-30T11:54:32.267784 +# Generated on 2023-05-06T18:02:46.554956 # # The Original Copyright notice from slurmdb.h has been included # below: @@ -22,11 +22,14 @@ # Written by Danny Auble da@llnl.gov, et. al. # CODE-OCEC-09-009. All rights reserved. # -# Please also check the DISCLAIMER file in the Slurm repository here: -# https://github.com/SchedMD/slurm/blob/master/DISCLAIMER +# Slurm is licensed under the GNU GPLv2. For the full text of Slurm's License, +# please see here: pyslurm/slurm/SLURM_LICENSE +# +# Please, as mentioned above, also have a look at Slurm's DISCLAIMER under +# pyslurm/slurm/SLURM_DISCLAIMER ############################################################################## # -# Copyright (C) 2023 The PySlurm Authors (Modifications as described above) +# Copyright (C) 2023 PySlurm Developers (Modifications as described above) # # This file is part of PySlurm # diff --git a/scripts/pyslurm_bindgen.py b/scripts/pyslurm_bindgen.py index c4bbd18b..82eb157c 100755 --- a/scripts/pyslurm_bindgen.py +++ b/scripts/pyslurm_bindgen.py @@ -133,13 +133,16 @@ def translate_slurm_header(hdr_dir, hdr): # below: # {copyright_notice}# -# Please also check the DISCLAIMER file in the Slurm repository here: -# https://github.com/SchedMD/slurm/blob/master/DISCLAIMER +# Slurm is licensed under the GNU GPLv2. For the full text of Slurm's License, +# please see here: pyslurm/slurm/SLURM_LICENSE +# +# Please, as mentioned above, also have a look at Slurm's DISCLAIMER under +# pyslurm/slurm/SLURM_DISCLAIMER ############################################################################## """ pyslurm_copyright = """# -# Copyright (C) 2023 The PySlurm Authors (Modifications as described above) +# Copyright (C) 2023 PySlurm Developers (Modifications as described above) # # This file is part of PySlurm # From 42471d8575e89caa64fea55677d1af130328b4a7 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Fri, 12 May 2023 22:11:04 +0200 Subject: [PATCH 21/48] some more improvements (#291) * set light/dark mode primary colors explicitly * Reuse logic to get the batch script of a job from the new Job API * improve job submit testing and fix bugs * JobSubmitDescription: fix some docstrings * change quality_of_service to just qos in pyslurm/db/job * fix date parse tests? --- mkdocs.yml | 2 + pyslurm/core/job/submission.pxd | 129 ++++---- pyslurm/core/job/submission.pyx | 441 ++++++++++++++------------- pyslurm/core/job/util.pyx | 125 ++++---- pyslurm/db/job.pxd | 4 +- pyslurm/db/job.pyx | 6 +- pyslurm/pyslurm.pyx | 48 +-- pyslurm/utils/cstr.pyx | 6 +- tests/integration/test_job_submit.py | 28 +- tests/unit/test_common.py | 26 +- tests/unit/test_job_submit.py | 313 +++++++++++-------- 11 files changed, 599 insertions(+), 529 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index c0379d74..1341b839 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -20,12 +20,14 @@ theme: - media: "(prefers-color-scheme: light)" scheme: default accent: purple + primary: indigo toggle: icon: material/brightness-7 name: Switch to dark mode - media: "(prefers-color-scheme: dark)" scheme: slate accent: lime + primary: black toggle: icon: material/brightness-4 name: Switch to light mode diff --git a/pyslurm/core/job/submission.pxd b/pyslurm/core/job/submission.pxd index 1547f5de..fdedc8ed 100644 --- a/pyslurm/core/job/submission.pxd +++ b/pyslurm/core/job/submission.pxd @@ -108,8 +108,7 @@ cdef class JobSubmitDescription: cpu_frequency (Union[dict, str]): CPU Frequency for the Job, same as --cpu-freq from sbatch. - Examples: - Specifying it as a dict: + For example, specifying it as a dict: cpu_frequency = { "min": "Low", @@ -117,17 +116,17 @@ cdef class JobSubmitDescription: "governor": "UserSpace" } - or like in sbatch with a string. For more info on that, check - out the sbatch documentation for --cpu-freq. + or like in sbatch with a string. For more info on that, check + out the sbatch documentation for --cpu-freq. - If you only want to set a Governor without any min or max, you - can simply specify it as a standalone string: + If you only want to set a Governor without any min or max, you + can simply specify it as a standalone string: cpu_frequency = "Performance" or cpu_frequency = {"governor": "Performance"} - If you want to set a specific, fixed frequency, you can do: + If you want to set a specific, fixed frequency, you can do: cpu_frequency = or either @@ -136,21 +135,19 @@ cdef class JobSubmitDescription: Amount of nodes needed for the job. This is the same as -N/--nodes from sbatch. - Examples: - Providing min/max nodes as a dict: + For example, providing min/max nodes as a dict: nodes = { "min": 3, "max": 6 } - When no range is needed, you can also simply specify it as - int: + When no range is needed, you can also simply specify it as int: nodes = 3 - Other than that, a range can also be specified in a str like - with sbatch: + Other than that, a range can also be specified in a str like with + sbatch: nodes = "1-5" deadline (str): @@ -193,7 +190,9 @@ cdef class JobSubmitDescription: This is the same as --mem-per-cpu from sbatch. This is mutually exclusive with memory_per_node and memory_per_gpu. + Examples: + # 1 MiB memory_per_cpu = 1024 @@ -207,7 +206,9 @@ cdef class JobSubmitDescription: This is the same as --mem from sbatch. This is mutually exclusive with memory_per_cpu and memory_per_gpu. + Examples: + # 1 MiB memory_per_node = 1024 @@ -221,7 +222,9 @@ cdef class JobSubmitDescription: This is the same as --mem-per-gpu from sbatch. This is mutually exclusive with memory_per_node and memory_per_cpu. + Examples: + # 1 MiB memory_per_gpu = 1024 @@ -235,12 +238,13 @@ cdef class JobSubmitDescription: This is the same as --nice from sbatch. log_files_open_mode (str): Mode in which standard_output and standard_error log files should be opened. + This is the same as --open-mode from sbatch. + Valid options are: - * append - * truncate - This is the same as --open-mode from sbatch. + * `append` + * `truncate` overcommit (bool): If the resources should be overcommitted. This is the same as -O/--overcommit from sbatch. @@ -254,15 +258,15 @@ cdef class JobSubmitDescription: Interval for accounting info to be gathered. This is the same as --acctg-freq from sbatch. - Examples: - Specifying it as a dict: + + For example, specifying it as a dict: accounting_gather_frequency = { - energy=60, - network=20, + "energy"=60, + "network"=20, } - or as a single string: + or as a single string: accounting_gather_frequency = "energy=60,network=20" qos (str): @@ -283,34 +287,38 @@ cdef class JobSubmitDescription: You can specify either a path to a script which will be loaded, or you can pass the script as a string. If the script is passed as a string, providing arguments to it - (see "script_args") is not supported. + (see `script_args`) is not supported. script_args (str): Arguments passed to the batch script. You can only set arguments if a file path was specified for - "script". + `script`. environment (Union[dict, str]): Environment variables to be set for the Job. This is the same as --export from sbatch. resource_sharing (str): Controls the resource sharing with other Jobs. - This property combines functionality of --oversubscribe and --exclusive from sbatch. + Allowed values are are: - * "oversubscribe" or "yes": + * `oversubscribe` or `yes`: + The Job allows resources to be shared with other running Jobs. - * "user" + * `user`: + Only sharing resources with other Jobs that have the "user" option set is allowed - * "mcs" + * `mcs`: + Only sharing resources with other Jobs that have the "mcs" option set is allowed. - * "no" or "exclusive" + * `no` or `exclusive`: + No sharing of resources is allowed. (--exclusive from sbatch) distribution (str): Task distribution for the Job, same as --distribution from sbatch @@ -327,12 +335,12 @@ cdef class JobSubmitDescription: The amount of cpus required for each task. This is the same as -c/--cpus-per-task from sbatch. - This is mutually exclusive with cpus_per_gpu. + This is mutually exclusive with `cpus_per_gpu`. cpus_per_gpu (int): The amount of cpus required for each allocated GPU. This is the same as --cpus-per-gpu from sbatch. - This is mutually exclusive with cpus_per_task. + This is mutually exclusive with `cpus_per_task`. sockets_per_node (int): Restrict Job to nodes with atleast this many sockets. This is the same as --sockets-per-node from sbatch. @@ -344,23 +352,22 @@ cdef class JobSubmitDescription: This is the same as --threads-per-core from sbatch. gpus (Union[dict, str, int]): GPUs for the Job to be allocated in total. - This is the same as -G/--gpus from sbatch. Specifying the type of the GPU is optional. - Examples: - Specifying the GPU counts as a dict: + + For example, specifying the GPU counts as a dict: gpus = { "tesla": 1, "volta": 5, } - Or, for example, in string format: + Or, for example, in string format: gpus = "tesla:1,volta:5" - Or, if you don't care about the type of the GPU: + Or, if you don't care about the type of the GPU: gpus = 6 gpus_per_socket (Union[dict, str, int]): @@ -369,21 +376,21 @@ cdef class JobSubmitDescription: This is the same as --gpus-per-socket from sbatch. Specifying the type of the GPU is optional. Note that setting - gpus_per_socket requires to also specify sockets_per_node. + `gpus_per_socket` requires to also specify sockets_per_node. - Examples: - Specifying it as a dict: + + For example, specifying it as a dict: gpus_per_socket = { "tesla": 1, "volta": 5, } - Or, for example, in string format: + Or, for example, in string format: gpus_per_socket = "tesla:1,volta:5" - Or, if you don't care about the type of the GPU: + Or, if you don't care about the type of the GPU: gpus_per_socket = 6 gpus_per_task (Union[dict, str, int]): @@ -392,22 +399,21 @@ cdef class JobSubmitDescription: This is the same as --gpus-per-task from sbatch. Specifying the type of the GPU is optional. Note that setting - "gpus_per_task" requires to also specify either one of "ntasks" or - "gpus". + `gpus_per_task` requires to also specify either one of `ntasks` or + `gpus`. - Examples: - Specifying it as a dict: + For example, specifying it as a dict: gpus_per_task = { "tesla": 1, "volta": 5, } - Or, for example, in string format: + Or, for example, in string format: gpus_per_task = "tesla:1,volta:5" - Or, if you don't care about the type of the GPU: + Or, if you don't care about the type of the GPU: gpus_per_task = 6 gres_per_node (Union[dict, str]): @@ -418,19 +424,18 @@ cdef class JobSubmitDescription: Specifying the type (by seperating GRES name and type with a semicolon) is optional. - Examples: - Specifying it as a dict: + For example, specifying it as a dict: gres_per_node = { "gpu:tesla": 1, "gpu:volta": 5, } - Or, for example, in string format: + Or, for example, in string format: gres_per_node = "gpu:tesla:1,gpu:volta:5" - GPU Gres without a specific type: + GPU Gres without a specific type: gres_per_node = "gpu:6" gpu_binding (str): @@ -458,12 +463,12 @@ cdef class JobSubmitDescription: switches. This is the same as --switches from sbatch. - Examples: - Specifying it as a dict: + + For example, specifying it as a dict: switches = { "count": 5, "max_wait_time": "00:10:00" } - Or as a single string (sbatch-style): + Or as a single string (sbatch-style): switches = "5@00:10:00" signal (Union[dict, str]): @@ -473,18 +478,18 @@ cdef class JobSubmitDescription: The signal can both be specified with its name, e.g. "SIGKILL", or as a number, e.g. 9 - Examples: - Specifying it as a dict: + + For example, specifying it as a dict: signal = { "signal": "SIGKILL", "time": 120 } - The above will send a "SIGKILL" signal 120 seconds before the - Jobs' time limit is reached. + The above will send a "SIGKILL" signal 120 seconds before the + Jobs' time limit is reached. - Or, specifying it as a string (sbatch-style): + Or, specifying it as a string (sbatch-style): signal = "SIGKILL@120" standard_in (str): @@ -509,9 +514,11 @@ cdef class JobSubmitDescription: Generic resource task binding options. This is the --gres-flags option from sbatch. + Possible values are: - * "enforce-binding" - * "disable-binding" + + * `enforce-binding` + * `disable-binding` temporary_disk_per_node (Union[str, int]): Amount of temporary disk space needed per node. @@ -519,7 +526,9 @@ cdef class JobSubmitDescription: K|M|G|T (multiples of 1024). If no unit is specified, the value will be assumed as Mebibytes. + Examples: + # 2048 MiB tmp_disk_per_node = "2G" diff --git a/pyslurm/core/job/submission.pyx b/pyslurm/core/job/submission.pyx index 50613937..bf47105b 100644 --- a/pyslurm/core/job/submission.pyx +++ b/pyslurm/core/job/submission.pyx @@ -107,9 +107,9 @@ cdef class JobSubmitDescription: !!! note - Instead of `SBATCH_`, pyslurm uses `PYSLURM_JOBDESC_` as a prefix to - identify environment variables which should be used to set - attributes. + Instead of `SBATCH_`, pyslurm uses `PYSLURM_JOBDESC_` as a prefix + to identify environment variables which should be used to set + attributes. Args: overwrite (bool): @@ -118,22 +118,26 @@ cdef class JobSubmitDescription: in this instance. Default is False Examples: - Lets consider you want to set the name of the Job and its - Account name. Therefore, you will need to have set these two - environment variables: + Lets consider you want to set the name of the Job, its Account + name and that the Job cannot be requeued. + Therefore, you will need to have set these environment variables: ```bash + # Format is: PYSLURM_JOBDESC_{ATTRIBUTE_NAME} export PYSLURM_JOBDESC_ACCOUNT="myaccount" export PYSLURM_JOBDESC_NAME="myjobname" + export PYSLURM_JOBDESC_IS_REQUEUEABLE="False" ``` + As you can see above, boolean values should be the literal strings + "False" or "True". In python, you can do this now: >>> import pyslurm >>> desc = pyslurm.JobSubmitDescription(...other args...) >>> desc.load_environment() - >>> print(desc.name, desc.account) - myjobname, myaccount + >>> print(desc.name, desc.account, desc.is_requeueable) + myjobname, myaccount, False """ self._parse_env(overwrite) @@ -219,7 +223,8 @@ cdef class JobSubmitDescription: cstr.fmalloc(&ptr.cpus_per_tres, cstr.from_gres_dict(self.cpus_per_gpu, "gpu")) cstr.fmalloc(&ptr.admin_comment, self.admin_comment) - + cstr.fmalloc(&self.ptr.dependency, + _parse_dependencies(self.dependencies)) cstr.from_list(&ptr.clusters, self.clusters) cstr.from_list(&ptr.exc_nodes, self.excluded_nodes) cstr.from_list(&ptr.req_nodes, self.required_nodes) @@ -227,13 +232,11 @@ cdef class JobSubmitDescription: cstr.from_list(&ptr.partition, self.partitions) cstr.from_list(&ptr.reservation, self.reservations) cstr.from_dict(&ptr.acctg_freq, self.accounting_gather_frequency) - ptr.deadline = date_to_timestamp(self.deadline) ptr.begin_time = date_to_timestamp(self.begin_time) ptr.delay_boot = timestr_to_secs(self.delay_boot_time) ptr.time_limit = timestr_to_mins(self.time_limit) ptr.time_min = timestr_to_mins(self.time_limit_min) - ptr.priority = u32(self.priority, zero_is_noval=False) ptr.num_tasks = u32(self.ntasks) ptr.pn_min_tmp_disk = u32(dehumanize(self.temporary_disk_per_node)) @@ -257,21 +260,22 @@ cdef class JobSubmitDescription: ptr.reboot = u16_bool(self.requires_node_reboot) ptr.requeue = u16_bool(self.is_requeueable) ptr.wait_all_nodes = u16_bool(self.wait_all_nodes) - ptr.mail_type = mail_type_list_to_int(self.mail_types) ptr.power_flags = power_type_list_to_int(self.power_options) ptr.profile = acctg_profile_list_to_int(self.profile_types) ptr.shared = shared_type_str_to_int(self.resource_sharing) - self._set_cpu_frequency() - self._set_nodes() - self._set_dependencies() + if not self.is_update: + self.ptr.min_nodes, self.ptr.max_nodes = _parse_nodes(self.nodes) + cstr.fmalloc(&self.ptr.script, + _validate_batch_script(self.script, self.script_args)) + self._set_script_args() + self._set_environment() + self._set_distribution() + self._set_memory() self._set_open_mode() - self._set_script() - self._set_script_args() - self._set_environment() - self._set_distribution() + self._set_cpu_frequency() self._set_gpu_binding() self._set_gres_binding() self._set_min_cpus() @@ -330,129 +334,19 @@ cdef class JobSubmitDescription: self.ptr.core_spec |= slurm.CORE_SPEC_THREAD def _set_cpu_frequency(self): - if not self.cpu_frequency: - return None - freq = self.cpu_frequency - have_no_range = False + if not freq: + return None # Alternatively support sbatch-like --cpu-freq setting. if not isinstance(freq, dict): - freq_splitted = re.split("[-:]+", str(freq)) - freq_len = len(freq_splitted) - freq = {} - - # Transform cpu-freq string to the individual components. - if freq_splitted[0].isdigit(): - freq["max"] = freq_splitted[0] - else: - if freq_len > 1: - raise ValueError( - "Invalid cpu_frequency format: {kwargs}." - "Governor must be provided as single element or " - "as last element in the form of min-max:governor. " - ) - freq["governor"] = freq_splitted[0] - - if freq_len >= 2: - freq["min"] = freq["max"] - freq["max"] = freq_splitted[1] - - if freq_len == 3: - freq["governor"] = freq_splitted[2] - - freq_min = cpu_freq_str_to_int(freq.get("min")) - freq_max = cpu_freq_str_to_int(freq.get("max")) - freq_gov = cpu_gov_str_to_int(freq.get("governor")) - - if freq_min != u32(None): - if freq_max == u32(None): - freq_max = freq_min - freq_min = u32(None) - have_no_range = True - elif freq_max < freq_min: - raise ValueError( - f"min cpu-freq ({freq_min}) must be smaller " - f"than max cpu-freq ({freq_max})" - ) - elif freq_max != u32(None) and freq_min == u32(None): - have_no_range = True - - if have_no_range and freq_gov != u32(None): - raise ValueError( - "Setting Governor when specifying only either one " - "of min or max is not allowed." - ) + freq = _parse_cpu_freq_str_to_dict(freq) + freq_min, freq_max, freq_gov = _validate_cpu_freq(freq) self.ptr.cpu_freq_min = freq_min self.ptr.cpu_freq_max = freq_max self.ptr.cpu_freq_gov = freq_gov - def _set_nodes(self): - vals = self.nodes - nmin=nmax = 1 - - if self.is_update: - return None - - # Support input like --nodes from sbatch (min-[max]) - if isinstance(vals, dict): - nmin = u32(vals.get("min", 1), on_noval=1) - nmax = u32(vals.get("max", 1), on_noval=nmin) - elif vals is not None: - v = str(vals).split("-", 1) - nmin = int(v[0]) - if nmin == 0: - nmin = 1 - if "-" in str(vals): - nmax = int(v[1]) - else: - nmax = nmin - - if not nmax: - nmax = nmin - if nmax < nmin: - raise ValueError("Max Nodecount cannot be " - "less than minimum nodecount.") - - self.ptr.min_nodes = nmin - self.ptr.max_nodes = nmax - - def _set_dependencies(self): - val = self.dependencies - final = None - - if isinstance(val, str): - # TODO: Even though everything is checked in the slurmctld, maybe - # still do some sanity checks here on the input when a string - # is provided. - final = val - elif val is not None: - satisfy = val.pop("satisfy", "all").casefold() - - if satisfy == "any": - delim = "?" - else: - delim = "," - - final = [] - for k, v in val.items(): - if k == "singleton" and bool(v): - final.append("singleton") - continue - - if not isinstance(v, list): - raise TypeError(f"Values for {k} must be list, " - f"got {type(v)}.") - # Convert everything to strings and add it to the dependency - # list. - v[:] = [str(s) for s in v] - final.append(f"{k}:{':'.join(v)}") - - final = delim.join(final) - - cstr.fmalloc(&self.ptr.dependency, final) - def _set_memory(self): if self.memory_per_cpu: self.ptr.pn_min_memory = u64(dehumanize(self.memory_per_cpu)) @@ -470,45 +364,6 @@ cdef class JobSubmitDescription: elif val == "truncate": self.ptr.open_mode = slurm.OPEN_MODE_TRUNCATE - def _set_script(self): - sfile = self.script - sbody = None - - if self.is_update: - return None - - if Path(sfile).is_file(): - # First assume the caller is passing a path to a script and we try - # to load it. - sbody = Path(sfile).read_text() - else: - # Otherwise assume that the script content is passed directly. - sbody = sfile - if self.script_args: - raise ValueError("Passing arguments to a script is only allowed " - "if it was loaded from a file.") - - # Validate the script - if not sbody or not len(sbody): - raise ValueError("Batch script is empty or none was provided.") - elif sbody.isspace(): - raise ValueError("Batch script contains only whitespace.") - elif not sbody.startswith("#!"): - msg = "Not a valid Batch script. " - msg += "First line must start with '#!'," - msg += "followed by the path to an interpreter" - raise ValueError(msg) - elif "\0" in sbody: - msg = "The Slurm Controller does not allow scripts that " - msg += "contain a NULL character: '\\0'." - raise ValueError(msg) - elif "\r\n" in sbody: - msg = "Batch script contains DOS line breaks (\\r\\n) " - msg += "instead of expected UNIX line breaks (\\n)." - raise ValueError(msg) - - cstr.fmalloc(&self.ptr.script, sbody) - def _set_script_args(self): args = self.script_args if not args: @@ -532,9 +387,6 @@ cdef class JobSubmitDescription: cstr.fmalloc(&self.ptr.argv[idx], opt) def _set_environment(self): - if self.is_update: - return None - vals = self.environment get_user_env = self.get_user_environment @@ -647,55 +499,32 @@ cdef class JobSubmitDescription: self.ptr.min_cpus = self.ptr.cpus_per_task * self.ptr.num_tasks def _set_switches(self): - kwargs = self.switches - if isinstance(kwargs, dict): - self.ptr.req_switch = u32(kwargs.get("count")) - self.ptr.wait4switch = timestr_to_secs(kwargs.get("max_wait_time")) - elif kwargs is not None: - vals = str(kwargs.split("@")) - if len(vals) > 1: - self.ptr.wait4switch = timestr_to_secs(vals[1]) - self.ptr.req_switch = u32(vals[0]) - - def _set_signal(self): - vals = self.signal + vals = self.switches if not vals: return None - info = vals - # This supports input like the --signal option from sbatch - if vals and not isinstance(vals, dict): - info = {} - val_list = re.split("[:@]+", str(vals)) - - if len(val_list): - if ":" in str(vals): - flags = val_list.pop(0).casefold() - - if "r" in flags: - info["allow_reservation_overlap"] = True + if not isinstance(vals, dict): + vals = _parse_switches_str_to_dict(vals) - if "b" in flags: - info["batch_only"] = True + self.ptr.req_switch = u32(kwargs.get("count")) + self.ptr.wait4switch = timestr_to_secs(kwargs.get("max_wait_time")) - if "@" in str(vals): - info["time"] = val_list[1] - - info["signal"] = val_list[0] + def _set_signal(self): + vals = self.signal + if not vals: + return None - # Parse values first to catch bad input - w_signal = u16(signal_to_num(info.get("signal"))) - w_time = u16(info.get("time"), on_noval=60) - batch_only = bool(info.get("batch_only")) - allow_resv_overlap = bool(info.get("allow_reservation_overlap")) + if not isinstance(vals, dict): + vals = _parse_signal_str_to_dict(vals) - # Then set it. At this point we can be sure that the input is correct. - self.ptr.warn_signal = w_signal - self.ptr.warn_time = w_time + self.ptr.warn_signal = u16(signal_to_num(vals.get("signal"))) + self.ptr.warn_time = u16(vals.get("time"), on_noval=60) u16_set_bool_flag(&self.ptr.warn_flags, - batch_only, slurm.KILL_JOB_BATCH) - u16_set_bool_flag(&self.ptr.warn_flags, - allow_resv_overlap, slurm.KILL_JOB_RESV) + bool(vals.get("batch_only")), slurm.KILL_JOB_BATCH) + u16_set_bool_flag( + &self.ptr.warn_flags, + bool(vals.get("allow_reservation_overlap")), + slurm.KILL_JOB_RESV) def _set_gres_binding(self): if not self.gres_binding: @@ -704,3 +533,183 @@ cdef class JobSubmitDescription: self.ptr.bitflags |= slurm.GRES_ENFORCE_BIND elif self.gres_binding.casefold() == "disable-binding": self.ptr.bitflags |= slurm.GRES_DISABLE_BIND + + +def _parse_dependencies(val): + final = None + + if isinstance(val, str): + # TODO: Even though everything is checked in the slurmctld, maybe + # still do some sanity checks here on the input when a string + # is provided. + final = val + elif val is not None: + satisfy = val.pop("satisfy", "all").casefold() + + if satisfy == "any": + delim = "?" + else: + delim = "," + + final = [] + for condition, vals in val.items(): + if condition == "singleton" and bool(vals): + final.append("singleton") + continue + + if not isinstance(vals, list): + vals = str(vals).split(",") + + vals = [str(s) for s in vals] + final.append(f"{condition}:{':'.join(vals)}") + + final = delim.join(final) + + return final + + +def _parse_nodes(vals): + nmin=nmax = 1 + + # Support input like --nodes from sbatch (min-[max]) + if isinstance(vals, dict): + nmin = u32(vals.get("min", 1), on_noval=1) + nmax = u32(vals.get("max", 1), on_noval=nmin) + elif vals is not None: + v = str(vals).split("-", 1) + nmin = int(v[0]) + if nmin == 0: + nmin = 1 + if "-" in str(vals): + nmax = int(v[1]) + else: + nmax = nmin + + if not nmax: + nmax = nmin + if nmax < nmin: + raise ValueError("Max Nodecount cannot be less than minimum" + " nodecount.") + + return nmin, nmax + + +def _parse_signal_str_to_dict(vals): + info = {} + # This supports input like the --signal option from sbatch + val_list = re.split("[:@]+", str(vals)) + + if len(val_list): + if ":" in str(vals): + flags = val_list.pop(0).casefold() + + if "r" in flags: + info["allow_reservation_overlap"] = True + + if "b" in flags: + info["batch_only"] = True + + if "@" in str(vals): + info["time"] = val_list[1] + + info["signal"] = val_list[0] + + return info + + +def _parse_switches_str_to_dict(switches_str): + out = {} + vals = str(switches_str.split("@")) + if len(vals) > 1: + out["max_wait_time"] = timestr_to_secs(vals[1]) + + out["count"] = u32(vals[0]) + + return out + + +def _parse_cpu_freq_str_to_dict(freq_str): + freq_splitted = re.split("[-:]+", str(freq_str)) + freq_len = len(freq_splitted) + freq = {} + + # Transform cpu-freq string to the individual components. + if freq_splitted[0].isdigit(): + freq["max"] = freq_splitted[0] + else: + if freq_len > 1: + raise ValueError( + "Invalid cpu_frequency format: {kwargs}." + "Governor must be provided as single element or " + "as last element in the form of min-max:governor. " + ) + freq["governor"] = freq_splitted[0] + + if freq_len >= 2: + freq["min"] = freq["max"] + freq["max"] = freq_splitted[1] + + if freq_len == 3: + freq["governor"] = freq_splitted[2] + + return freq + + +def _validate_cpu_freq(freq): + have_no_range = False + freq_min = cpu_freq_str_to_int(freq.get("min")) + freq_max = cpu_freq_str_to_int(freq.get("max")) + freq_gov = cpu_gov_str_to_int(freq.get("governor")) + + if freq_min != u32(None): + if freq_max == u32(None): + freq_max = freq_min + freq_min = u32(None) + have_no_range = True + elif freq_max < freq_min: + raise ValueError( + f"min cpu-freq ({freq_min}) must be smaller " + f"than max cpu-freq ({freq_max})" + ) + elif freq_max != u32(None) and freq_min == u32(None): + have_no_range = True + + if have_no_range and freq_gov != u32(None): + raise ValueError( + "Setting Governor when specifying only either one " + "of min or max is not allowed." + ) + + return freq_min, freq_max, freq_gov + + +def _validate_batch_script(script, args=None): + if Path(script).is_file(): + # First assume the caller is passing a path to a script and we try + # to load it. + script = Path(script).read_text() + else: + if args: + raise ValueError("Passing arguments to a script is only allowed " + "if it was loaded from a file.") + + # Validate the script + if not script or not len(script): + raise ValueError("Batch script is empty or none was provided.") + elif script.isspace(): + raise ValueError("Batch script contains only whitespace.") + elif not script.startswith("#!"): + msg = "Not a valid Batch script. " + msg += "First line must start with '#!'," + msg += "followed by the path to an interpreter" + raise ValueError(msg) + elif "\0" in script: + msg = "The Slurm Controller does not allow scripts that " + msg += "contain a NULL character: '\\0'." + raise ValueError(msg) + elif "\r\n" in script: + msg = "Batch script contains DOS line breaks (\\r\\n) " + msg += "instead of expected UNIX line breaks (\\n)." + raise ValueError(msg) + + return script diff --git a/pyslurm/core/job/util.pyx b/pyslurm/core/job/util.pyx index 31a31638..aedd43d3 100644 --- a/pyslurm/core/job/util.pyx +++ b/pyslurm/core/job/util.pyx @@ -30,10 +30,9 @@ from pyslurm.utils.uint cimport * # Note: Maybe consider using libslurmfull again to avoid having to reimplement # some of these functions and keeping track for changes in new releases. -def mail_type_list_to_int(mail_types): +def mail_type_list_to_int(types): """Convert a str or list of mail types to a uint16_t.""" cdef uint16_t flags = 0 - types = mail_types if not types or "None" == types: return slurm.NO_VAL16 @@ -41,42 +40,31 @@ def mail_type_list_to_int(mail_types): if isinstance(types, str): types = types.split(",") - for typ in mail_types: + for typ in types: typ = typ.casefold() if "array_tasks" == typ: flags |= slurm.MAIL_ARRAY_TASKS - elif "begin" == typ: flags |= slurm.MAIL_JOB_BEGIN - elif "end" == typ: flags |= slurm.MAIL_JOB_END - elif "fail" == typ: flags |= slurm.MAIL_JOB_FAIL - # elif "invalid_depend" == typ: # flags |= slurm.MAIL_INVALID_DEPEND - elif "requeue" == typ: flags |= slurm.MAIL_JOB_REQUEUE - elif "stage_out" == typ: flags |= slurm.MAIL_JOB_STAGE_OUT - elif "time_limit" == typ: flags |= slurm.MAIL_JOB_TIME100 - elif "time_limit_90" == typ: flags |= slurm.MAIL_JOB_TIME90 - elif "time_limit_80" == typ: flags |= slurm.MAIL_JOB_TIME80 - elif "time_limit_50" == typ: flags |= slurm.MAIL_JOB_TIME50 - elif "all" == typ: flags |= (slurm.MAIL_JOB_BEGIN | slurm.MAIL_JOB_END @@ -97,65 +85,68 @@ def mail_type_int_to_list(uint16_t typ): return types if typ & slurm.MAIL_ARRAY_TASKS: - types.append("array_tasks") + types.append("ARRAY_TASKS") # if typ & slurm.MAIL_INVALID_DEPEND: # types.append("invalid_depend") if typ & slurm.MAIL_JOB_BEGIN: - types.append("begin") + types.append("BEGIN") if typ & slurm.MAIL_JOB_END: - types.append("end") + types.append("END") if typ & slurm.MAIL_JOB_FAIL: - types.append("fail") + types.append("FAIL") if typ & slurm.MAIL_JOB_REQUEUE: - types.append("requeue") + types.append("REQUEUE") if typ & slurm.MAIL_JOB_STAGE_OUT: - types.append("stage_out") + types.append("STAGE_OUT") if typ & slurm.MAIL_JOB_TIME50: - types.append("time_limit_50") + types.append("TIME_LIMIT_50") if typ & slurm.MAIL_JOB_TIME80: - types.append("time_limit_80") + types.append("TIME_LIMIT_80") if typ & slurm.MAIL_JOB_TIME90: - types.append("time_limit_90") + types.append("TIME_LIMIT_90") if typ & slurm.MAIL_JOB_TIME100: - types.append("time_limit_100") + types.append("TIME_LIMIT_100") return types -def acctg_profile_list_to_int(acctg_profiles): +def acctg_profile_list_to_int(types): """Convert a str or list of accounting gather profiles to uin32_t.""" cdef uint32_t profile = 0 - profiles = acctg_profiles - if not acctg_profiles: + if not types: return slurm.NO_VAL - if "none" in acctg_profiles: - return slurm.ACCT_GATHER_PROFILE_NONE - elif "all" in acctg_profiles: - return slurm.ACCT_GATHER_PROFILE_ALL - - if "energy" in acctg_profiles: - profile |= slurm.ACCT_GATHER_PROFILE_ENERGY - - if "task" in acctg_profiles: - profile |= slurm.ACCT_GATHER_PROFILE_TASK + if isinstance(types, str): + types = types.split(",") - if "lustre" in acctg_profiles: - profile |= slurm.ACCT_GATHER_PROFILE_LUSTRE + for typ in types: + typ = typ.casefold() - if "network" in acctg_profiles: - profile |= slurm.ACCT_GATHER_PROFILE_NETWORK + if "energy" == typ: + profile |= slurm.ACCT_GATHER_PROFILE_ENERGY + elif "task" == typ: + profile |= slurm.ACCT_GATHER_PROFILE_TASK + elif "lustre" == typ: + profile |= slurm.ACCT_GATHER_PROFILE_LUSTRE + elif "network" == typ: + profile |= slurm.ACCT_GATHER_PROFILE_NETWORK + elif "none" == typ: + return slurm.ACCT_GATHER_PROFILE_NONE + elif "all" == typ: + return slurm.ACCT_GATHER_PROFILE_ALL + else: + raise ValueError("Invalid profile type: {typ}.") return profile @@ -168,34 +159,44 @@ def acctg_profile_int_to_list(flags): return [] if flags == slurm.ACCT_GATHER_PROFILE_ALL: - return ["all"] + return ["ALL"] elif flags == slurm.ACCT_GATHER_PROFILE_NONE: return [] if flags & slurm.ACCT_GATHER_PROFILE_ENERGY: - profiles.append("energy") + profiles.append("ENERGY") if flags & slurm.ACCT_GATHER_PROFILE_TASK: - profiles.append("task") + profiles.append("TASK") if flags & slurm.ACCT_GATHER_PROFILE_LUSTRE: - profiles.append("lustre") + profiles.append("LUSTRE") if flags & slurm.ACCT_GATHER_PROFILE_NETWORK: - profiles.append("network") + profiles.append("NETWORK") return profiles -def power_type_list_to_int(power_types): +def power_type_list_to_int(types): """Convert a str or list of str with power types to uint8_t.""" cdef uint8_t flags = 0 - if not power_types: + if not types: return slurm.NO_VAL8 - if "level" in power_types: - flags |= slurm.SLURM_POWER_FLAGS_LEVEL + if isinstance(types, str): + types = types.split(",") + + for typ in types: + typ = typ.casefold() + + if "level" == typ: + flags |= slurm.SLURM_POWER_FLAGS_LEVEL + else: + raise ValueError("Invalid power type: {typ}.") + + return flags def power_type_int_to_list(flags): @@ -203,7 +204,7 @@ def power_type_int_to_list(flags): types = [] if flags & slurm.SLURM_POWER_FLAGS_LEVEL: - types.append("level") + types.append("LEVEL") return types @@ -255,7 +256,7 @@ def cpu_gov_str_to_int(gov): def cpu_freq_str_to_int(freq): """Convert a cpu-frequency str to its numerical representation.""" if not freq: - return u32(None) + return slurm.NO_VAL if isinstance(freq, str) and not freq.isdigit(): freq = freq.casefold() @@ -279,25 +280,25 @@ def cpu_freq_str_to_int(freq): def cpu_freq_int_to_str(freq): """Convert a numerical cpufreq value to its string representation.""" if freq == slurm.CPU_FREQ_LOW: - return "Low" + return "LOW" elif freq == slurm.CPU_FREQ_MEDIUM: - return "Medium" + return "MEDIUM" elif freq == slurm.CPU_FREQ_HIGHM1: - return "Highm1" + return "HIGHM1" elif freq == slurm.CPU_FREQ_HIGH: - return "High" + return "HIGH" elif freq == slurm.CPU_FREQ_CONSERVATIVE: - return "Conservative" + return "CONSERVATIVE" elif freq == slurm.CPU_FREQ_PERFORMANCE: - return "Performance" + return "PERFORMANCE" elif freq == slurm.CPU_FREQ_POWERSAVE: - return "PowerSave" + return "POWERSAVE" elif freq == slurm.CPU_FREQ_USERSPACE: - return "UserSpace" + return "USERSPACE" elif freq == slurm.CPU_FREQ_ONDEMAND: - return "OnDemand" + return "ONDEMAND" elif freq == slurm.CPU_FREQ_SCHEDUTIL: - return "SchedUtil" + return "SCHEDUTIL" elif freq & slurm.CPU_FREQ_RANGE_FLAG: return None elif freq == slurm.NO_VAL or freq == 0: diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index 28ba5423..b592c0f6 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -135,7 +135,7 @@ cdef class JobSearchFilter: max_cpus nodes max_nodes - qualities_of_service + qos names partitions groups @@ -229,7 +229,7 @@ cdef class Job: Name of the Partition for this Job priority (int): Priority for the Job - quality_of_service (str): + qos (str): Name of the Quality of Service for the Job cpus (int): Amount of CPUs the Job has/had allocated, or, if the Job is still diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 50fd0c8c..45eee895 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -69,12 +69,12 @@ cdef class JobSearchFilter: self.ptr.flags |= slurm.JOBCOND_FLAG_NO_TRUNC def _parse_qos(self): - if not self.qualities_of_service: + if not self.qos: return None qos_id_list = [] qos = QualitiesOfService.load() - for q in self.qualities_of_service: + for q in self.qos: if isinstance(q, int): qos_id_list.append(q) elif q in qos: @@ -519,7 +519,7 @@ cdef class Job: return u32_parse(self.ptr.priority, zero_is_noval=False) @property - def quality_of_service(self): + def qos(self): _qos = self.qos_data.get(self.ptr.qosid, None) if _qos: return _qos.name diff --git a/pyslurm/pyslurm.pyx b/pyslurm/pyslurm.pyx index 89b226a2..0fbf0ab7 100644 --- a/pyslurm/pyslurm.pyx +++ b/pyslurm/pyslurm.pyx @@ -56,6 +56,7 @@ import builtins as __builtin__ from pyslurm cimport slurm from pyslurm.slurm cimport xmalloc +import pyslurm.core.job include "pydefines/slurm_errno_defines.pxi" include "pydefines/slurm_errno_enums.pxi" @@ -2362,17 +2363,6 @@ cdef class job: Returns: (str): The content of the batch script. """ - # This reimplements the slurm_job_batch_script API call. Otherwise we - # would have to parse the FILE* ptr we get from it back into a - # char* which would be a bit silly. Source: - # https://github.com/SchedMD/slurm/blob/7162f15af8deaf02c3bbf940d59e818cdeb5c69d/src/api/job_info.c#L1319 - cdef: - slurm.job_id_msg_t msg - slurm.slurm_msg_t req - slurm.slurm_msg_t resp - int rc = slurm.SLURM_SUCCESS - str script = None - if isinstance(jobid, int) or isinstance(jobid, long): jobid = str(jobid).encode("UTF-8") else: @@ -2380,41 +2370,7 @@ cdef class job: jobid_xlate = slurm.slurm_xlate_job_id(jobid) - slurm.slurm_msg_t_init(&req) - slurm.slurm_msg_t_init(&resp) - - memset(&msg, 0, sizeof(msg)) - msg.job_id = jobid_xlate - req.msg_type = slurm.REQUEST_BATCH_SCRIPT - req.data = &msg - - rc = slurm.slurm_send_recv_controller_msg(&req, &resp, - slurm.working_cluster_rec) - if rc < 0: - err = slurm.slurm_get_errno() - raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(err), ''), - err) - - if resp.msg_type == slurm.RESPONSE_BATCH_SCRIPT: - script = slurm.stringOrNone(resp.data, None) - slurm.xfree(resp.data) - - elif resp.msg_type == slurm.RESPONSE_SLURM_RC: - rc = ( resp.data).return_code - slurm.slurm_free_return_code_msg(resp.data) - - if rc == slurm.SLURM_ERROR: - rc = slurm.slurm_get_errno() - - raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(rc), ''), - rc) - - else: - rc = slurm.slurm_get_errno() - raise ValueError(slurm.stringOrNone(slurm.slurm_strerror(rc), ''), - rc) - - return script + return pyslurm.core.job.Job(jobid_xlate).get_batch_script() cdef int fill_job_desc_from_opts(self, dict job_opts, slurm.job_desc_msg_t *desc): """ diff --git a/pyslurm/utils/cstr.pyx b/pyslurm/utils/cstr.pyx index 8301c994..12a39ecb 100644 --- a/pyslurm/utils/cstr.pyx +++ b/pyslurm/utils/cstr.pyx @@ -30,7 +30,7 @@ cdef bytes NONE_BYTE = "None".encode("ascii") cdef char *from_unicode(s): """Convert Python3 str (unicode) to char* (no malloc) - Note + Note: The lifetime of this char* depends on the lifetime of the equivalent python-object passed in. If the python-object is gone, the char* cannot be used safely anymore. @@ -173,7 +173,7 @@ def dict_to_str(vals, prepend=None, delim1=",", delim2="="): for Key/Value type things, which can be easily created from a dict. A String which already has this form can also be passed in. The correct - format of this string will the be validated. + format of this string will then be validated. """ cdef: tmp_dict = {} if not vals else vals @@ -273,7 +273,7 @@ def from_gres_dict(vals, typ=""): raise ValueError(f"Invalid specifier: '{gres_and_type}'") if typ not in gres_and_type: - gres_and_type = f"{gres_and_type}:{typ}" + gres_and_type = f"{typ}:{gres_and_type}" final.append(f"gres:{gres_and_type}:{int(cnt)}") diff --git a/tests/integration/test_job_submit.py b/tests/integration/test_job_submit.py index d2f7c98b..0626d1c1 100644 --- a/tests/integration/test_job_submit.py +++ b/tests/integration/test_job_submit.py @@ -20,10 +20,8 @@ # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """test_job_submit.py - Test the job submit api functions.""" -import time import pytest import pyslurm -from os import environ as pyenviron from util import create_simple_job_desc, create_job_script from pyslurm import ( Job, @@ -32,12 +30,24 @@ RPCError, ) - -def test_submit_example1(): - # TODO - assert True +def job_desc(**kwargs): + return JobSubmitDescription(script=create_job_script(), **kwargs) -def test_submit_example2(): - # TODO - assert True +def test_submit_example1(): + desc = job_desc() + desc.name = "test1" + desc.working_directory = "/tmp" + desc.qos = "normal" + desc.standard_output = "/tmp/test1.out" + desc.standard_error = "/tmp/test1.err" + desc.ntasks = 2 + desc.cpus_per_task = 2 + desc.resource_sharing = "yes" + desc.memory_per_cpu = "2G" + desc.time_limit = 10 + desc.nice = 500 + desc.distribution = "block:block:cyclic" + desc.is_requeueable = True + desc.kill_on_node_fail = True + desc.submit() diff --git a/tests/unit/test_common.py b/tests/unit/test_common.py index e8bf85a2..dd812665 100644 --- a/tests/unit/test_common.py +++ b/tests/unit/test_common.py @@ -22,7 +22,7 @@ import pyslurm import pytest -import datetime +from datetime import datetime from pyslurm import Job, JobSubmitDescription, Node from pyslurm.utils.ctime import ( timestr_to_mins, @@ -59,9 +59,9 @@ from pyslurm.utils import cstr -class TestTypes: +class TestStrings: - def test_strings(self): + def test_fmalloc(self): n = Node() n.name = "Testing fmalloc string routines." @@ -132,15 +132,26 @@ def test_dict_to_gres_str(self): expected_str = "gres:gpu:tesla:3" assert cstr.from_gres_dict(input_dict) == expected_str assert cstr.from_gres_dict(expected_str) == expected_str + assert cstr.from_gres_dict("gpu:tesla:3") == expected_str input_dict = {"gpu": 3} expected_str = "gres:gpu:3" assert cstr.from_gres_dict(input_dict) == expected_str assert cstr.from_gres_dict(expected_str) == expected_str + assert cstr.from_gres_dict("gpu:3") == expected_str + + input_dict = {"tesla": 3, "a100": 5} + expected_str = "gres:gpu:tesla:3,gres:gpu:a100:5" + assert cstr.from_gres_dict(input_dict, "gpu") == expected_str + assert cstr.from_gres_dict(expected_str) == expected_str + assert cstr.from_gres_dict("tesla:3,a100:5", "gpu") == expected_str def test_str_to_gres_dict(self): assert True + +class TestUint: + def _uint_impl(self, func_set, func_get, typ): val = func_set(2**typ-2) assert func_get(val) == None @@ -248,12 +259,10 @@ def test_parse_seconds(self): timestr_to_secs("invalid_val") def test_parse_date(self): - timestamp = 1667941697 - date = "2022-11-08T21:08:17" - datetime_date = datetime.datetime(2022, 11, 8, 21, 8, 17) + datetime_date = datetime(2022, 11, 8, 21, 8, 17) + timestamp = int(datetime_date.timestamp()) + date = datetime_date.isoformat(timespec="seconds") - # Converting date str to timestamp with the slurm API functions may - # not yield the expected timestamp above due to using local time zone assert date_to_timestamp(date) == timestamp assert date_to_timestamp(timestamp) == timestamp assert date_to_timestamp(datetime_date) == timestamp @@ -267,6 +276,7 @@ def test_parse_date(self): match="Invalid Time Specification: 2022-11-08T21"): date_to_timestamp("2022-11-08T21") + class TestMiscUtil: def test_parse_uid(self): diff --git a/tests/unit/test_job_submit.py b/tests/unit/test_job_submit.py index d0daf41b..5720f75f 100644 --- a/tests/unit/test_job_submit.py +++ b/tests/unit/test_job_submit.py @@ -28,18 +28,36 @@ import os from os import environ as pyenviron from util import create_simple_job_desc, create_job_script +from pyslurm.utils.uint import u32 from pyslurm import ( Job, Jobs, JobSubmitDescription, RPCError, ) +from pyslurm.core.job.submission import ( + _parse_cpu_freq_str_to_dict, + _validate_cpu_freq, + _parse_nodes, + _parse_dependencies, + _parse_signal_str_to_dict, + _validate_batch_script, +) +from pyslurm.core.job.util import ( + mail_type_list_to_int, + acctg_profile_list_to_int, + cpu_freq_str_to_int, + cpu_gov_str_to_int, + shared_type_str_to_int, + power_type_list_to_int, +) + def job_desc(**kwargs): return JobSubmitDescription(script=create_job_script(), **kwargs) -def test_environment(): +def test_parse_environment(): job = job_desc() # Everything in the current environment will be exported @@ -58,91 +76,84 @@ def test_environment(): # } -def test_cpu_frequencyuency(): - job = job_desc() - job._create_job_submit_desc() - - job.cpu_frequency = "Performance" - job._create_job_submit_desc() - - job.cpu_frequency = {"governor": "Performance"} - job._create_job_submit_desc() - - job.cpu_frequency = 1000000 - job._create_job_submit_desc() - - job.cpu_frequency = {"max": 1000000} - job._create_job_submit_desc() - - job.cpu_frequency = "1000000-3700000" - job._create_job_submit_desc() - - job.cpu_frequency = {"min": 1000000, "max": 3700000} - job._create_job_submit_desc() - - job.cpu_frequency = "1000000-3700000:Performance" - job._create_job_submit_desc() - - job.cpu_frequency = {"min": 1000000, "max": 3700000, - "governor": "Performance"} - job._create_job_submit_desc() +def test_parse_cpu_frequency(): + freq = "Performance" + freq_dict = _parse_cpu_freq_str_to_dict(freq) + assert freq_dict["governor"] == "Performance" + assert len(freq_dict) == 1 + _validate_cpu_freq(freq_dict) + + freq = 1000000 + freq_dict = _parse_cpu_freq_str_to_dict(freq) + assert freq_dict["max"] == "1000000" + assert len(freq_dict) == 1 + _validate_cpu_freq(freq_dict) + + freq = "1000000-3700000" + freq_dict = _parse_cpu_freq_str_to_dict(freq) + assert freq_dict["min"] == "1000000" + assert freq_dict["max"] == "3700000" + assert len(freq_dict) == 2 + _validate_cpu_freq(freq_dict) + + freq = "1000000-3700000:Performance" + freq_dict = _parse_cpu_freq_str_to_dict(freq) + assert freq_dict["min"] == "1000000" + assert freq_dict["max"] == "3700000" + assert freq_dict["governor"] == "Performance" + _validate_cpu_freq(freq_dict) with pytest.raises(ValueError, match=r"Invalid cpu_frequency format*"): - job.cpu_frequency = "Performance:3700000" - job._create_job_submit_desc() + freq = "Performance:3700000" + freq_dict = _parse_cpu_freq_str_to_dict(freq) with pytest.raises(ValueError, match=r"min cpu-freq*"): - job.cpu_frequency = "4000000-3700000" - job._create_job_submit_desc() + freq = "4000000-3700000" + freq_dict = _parse_cpu_freq_str_to_dict(freq) + _validate_cpu_freq(freq_dict) - with pytest.raises(ValueError, - match=r"Invalid cpu freq value*"): - job.cpu_frequency = "3700000:Performance" - job._create_job_submit_desc() +# with pytest.raises(ValueError, +# match=r"Invalid cpu freq value*"): +# freq = "3700000:Performance" +# job._create_job_submit_desc() with pytest.raises(ValueError, match=r"Setting Governor when specifying*"): - job.cpu_frequency = {"max": 3700000, "governor": "Performance"} - job._create_job_submit_desc() + freq = {"max": 3700000, "governor": "Performance"} + _validate_cpu_freq(freq) with pytest.raises(ValueError, match=r"Setting Governor when specifying*"): - job.cpu_frequency = {"min": 3700000, "governor": "Performance"} - job._create_job_submit_desc() + freq = {"min": 3700000, "governor": "Performance"} + _validate_cpu_freq(freq) -def test_nodes(): - job = job_desc() - job._create_job_submit_desc() - - job.nodes = "5" - job._create_job_submit_desc() +def test_parse_nodes(): + nodes = "5" + nmin, nmax = _parse_nodes(nodes) + assert nmin == 5 + assert nmax == 5 - job.nodes = {"min": 5, "max": 5} - job._create_job_submit_desc() + nodes = {"min": 5, "max": 5} + nmin, nmax = _parse_nodes(nodes) + assert nmin == 5 + assert nmax == 5 - job.nodes = "5-10" - job._create_job_submit_desc() - - job.nodes = {"min": 5, "max": 10} - job._create_job_submit_desc() + nodes = "5-10" + nmin, nmax = _parse_nodes(nodes) + assert nmin == 5 + assert nmax == 10 with pytest.raises(ValueError, match=r"Max Nodecount cannot be less than*"): - job.nodes = {"min": 10, "max": 5} - job._create_job_submit_desc() + nodes = {"min": 10, "max": 5} + nmin, nmax = _parse_nodes(nodes) -def test_script(): - job = job_desc() +def test_parse_script(): script = create_job_script() - job._create_job_submit_desc() - - job.script = script - assert job.script == script - assert job.script_args is None # Try passing in a path to a script. fd, path = tempfile.mkstemp() @@ -150,104 +161,148 @@ def test_script(): with os.fdopen(fd, 'w') as tmp: tmp.write(script) - job.script = path - job.script_args = "-t 10 input.csv" - job._create_job_submit_desc() + _validate_batch_script(path, "-t 10 input.csv") finally: os.remove(path) with pytest.raises(ValueError, match=r"Passing arguments to a script*"): - job.script = "#!/bin/bash\nsleep 10" - job.script_args = "-t 10" - job._create_job_submit_desc() + script = "#!/bin/bash\nsleep 10" + script_args = "-t 10" + _validate_batch_script(script, script_args) with pytest.raises(ValueError, match=r"The Slurm Controller does not allow*"): - job.script = script + "\0" - job.script_args = None - job._create_job_submit_desc() + script = "#!/bin/bash\nsleep 10" + "\0" + script_args = None + _validate_batch_script(script, script_args) with pytest.raises(ValueError, - match="You need to provide a batch script."): - job.script = "" - job.script_args = None - job._create_job_submit_desc() + match="Batch script is empty or none was provided."): + script = "" + script_args = None + _validate_batch_script(script, script_args) with pytest.raises(ValueError, match=r"Batch script contains DOS line breaks*"): - job.script = script + "\r\n" - job.script_args = None - job._create_job_submit_desc() - - -def test_dependencies(): - job = job_desc() - job._create_job_submit_desc() + script = "#!/bin/bash\nsleep 10" + "\r\n" + script_args = None + _validate_batch_script(script, script_args) - job.dependencies = "after:70:90:60+30,afterok:80" - job._create_job_submit_desc() - job.dependencies = "after:70:90:60?afterok:80" - job._create_job_submit_desc() - - job.dependencies = { +def test_parse_dependencies(): + dep = { "afterany": [40, 30, 20], "afternotok": [100], "satisfy": "any", "singleton": True, } - job._create_job_submit_desc() + dep_str = _parse_dependencies(dep) + assert dep_str == "afterany:40:30:20?afternotok:100?singleton" + dep = { + "after": [100, "200+30"], + "afterok": [300], + } + dep_str = _parse_dependencies(dep) + assert dep_str == "after:100:200+30,afterok:300" + + dep = { + "after": 200, + "afterok": 300, + } + dep_str = _parse_dependencies(dep) + assert dep_str == "after:200,afterok:300" -def test_cpus(): - job = job_desc() - job._create_job_submit_desc() +def test_validate_cpus(): + job = job_desc() job.cpus_per_task = 5 - job._create_job_submit_desc() + job._validate_options() with pytest.raises(ValueError, match="cpus_per_task and cpus_per_gpu are mutually exclusive."): job.cpus_per_gpu = 5 - job._create_job_submit_desc() + job._validate_options() job.cpus_per_task = None job.cpus_per_gpu = 5 - job._create_job_submit_desc() + job._validate_options() with pytest.raises(ValueError, match="cpus_per_task and cpus_per_gpu are mutually exclusive."): job.cpus_per_task = 5 - job._create_job_submit_desc() + job._validate_options() -def test_gres_per_node(): - job = job_desc() - job._create_job_submit_desc() +def test_parse_signal(): + signal = 7 + signal_dict = _parse_signal_str_to_dict(signal) + assert signal_dict["signal"] == "7" + assert len(signal_dict) == 1 - job.gres_per_node = "gpu:tesla:1,gpu:volta:5" - job._create_job_submit_desc() + signal = "7@120" + signal_dict = _parse_signal_str_to_dict(signal) + assert signal_dict["signal"] == "7" + assert signal_dict["time"] == "120" + assert len(signal_dict) == 2 - job.gres_per_node = {"gpu:tesla": 1, "gpu:volta": 1} - job._create_job_submit_desc() + signal = "RB:8@180" + signal_dict = _parse_signal_str_to_dict(signal) + assert signal_dict["signal"] == "8" + assert signal_dict["time"] == "180" + assert signal_dict["batch_only"] + assert signal_dict["allow_reservation_overlap"] + assert len(signal_dict) == 4 -def test_signal(): - job = job_desc() - job._create_job_submit_desc() +def test_mail_type_list_to_int(): + typ = "ARRAY_TASKS,BEGIN" + assert mail_type_list_to_int(typ) > 0 - job.signal = 7 - job._create_job_submit_desc() + with pytest.raises(ValueError, match=r"Invalid *"): + typ = "BEGIN,END,INVALID_TYPE" + mail_type_list_to_int(typ) - job.signal = {"batch_only": True} - job._create_job_submit_desc() - job.signal = "7@120" - job._create_job_submit_desc() +def test_acctg_profile_list_to_int(): + typ = "energy,task" + assert acctg_profile_list_to_int(typ) > 0 - job.signal = "RB:8@180" - job._create_job_submit_desc() + with pytest.raises(ValueError, match=r"Invalid *"): + typ = "energy,invalid_type" + acctg_profile_list_to_int(typ) + + +def test_power_type_list_to_int(): + typ = "level" + assert power_type_list_to_int(typ) > 0 + + with pytest.raises(ValueError, match=r"Invalid *"): + typ = "invalid_type" + power_type_list_to_int(typ) + + +def test_cpu_gov_str_to_int(): + typ = "PERFORMANCE" + assert cpu_gov_str_to_int(typ) > 0 + + with pytest.raises(ValueError, match=r"Invalid *"): + typ = "INVALID_GOVERNOR" + cpu_gov_str_to_int(typ) + + +def test_cpu_freq_str_to_int(): + typ = "HIGH" + assert cpu_freq_str_to_int(typ) > 0 + + with pytest.raises(ValueError, match=r"Invalid *"): + typ = "INVALID_FREQ_STR" + cpu_freq_str_to_int(typ) + + with pytest.raises(OverflowError): + typ = 2**32 + cpu_freq_str_to_int(typ) def test_setting_attrs_with_env_vars(): @@ -267,14 +322,22 @@ def test_setting_attrs_with_env_vars(): assert job.wckey == "wckey" assert job.clusters == "cluster1,cluster2" assert job.comment == "A simple job comment" + assert job.requires_contiguous_nodes == True assert job.working_directory == "/work/user2" + + job = job_desc(working_directory="/work/user2", account="account2") + job.load_environment(overwrite=True) + + assert job.account == "account1" + assert job.name == "jobname" + assert job.wckey == "wckey" + assert job.clusters == "cluster1,cluster2" + assert job.comment == "A simple job comment" assert job.requires_contiguous_nodes == True - job._create_job_submit_desc() + assert job.working_directory == "/work/user1" def test_parsing_sbatch_options_from_script(): - job = job_desc(working_directory="/work/user2") - fd, path = tempfile.mkstemp() try: with os.fdopen(fd, 'w') as tmp: @@ -292,15 +355,25 @@ def test_parsing_sbatch_options_from_script(): """ ) + job = job_desc(ntasks=5) job.script = path job.load_sbatch_options() assert job.time_limit == "20" assert job.memory_per_cpu == "1G" assert job.gpus == "1" assert job.resource_sharing == "no" + assert job.ntasks == 5 + assert job.cpus_per_task == "3" + + job = job_desc(ntasks=5) + job.script = path + job.load_sbatch_options(overwrite=True) + assert job.time_limit == "20" + assert job.memory_per_cpu == "1G" + assert job.gpus == "1" + assert job.resource_sharing == "no" assert job.ntasks == "2" assert job.cpus_per_task == "3" - job._create_job_submit_desc() finally: os.remove(path) From 538bf424c0a008175338115c974c74e31af10296 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Thu, 18 May 2023 21:54:27 +0200 Subject: [PATCH 22/48] Start maintaining a changelog (#293) * Start maintaining a Changelog - include it in the docs * bump version to 23.2.1 --- CHANGELOG.md | 56 ++++++++++++++++++++++++++++++++++++++++++ docs/changelog.md | 1 + mkdocs.yml | 6 +++++ pyslurm/__version__.py | 2 +- setup.py | 2 +- 5 files changed, 65 insertions(+), 2 deletions(-) create mode 100644 CHANGELOG.md create mode 100644 docs/changelog.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..18aa6787 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,56 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [23.2.1](https://github.com/PySlurm/pyslurm/releases/tag/v23.2.1) - 2023-05-18 + +### Added + +- Classes to interact with the Job and Submission API + - [pyslurm.Job](https://pyslurm.github.io/23.2/reference/job/#pyslurm.Job) + - [pyslurm.Jobs](https://pyslurm.github.io/23.2/reference/job/#pyslurm.Jobs) + - [pyslurm.JobStep](https://pyslurm.github.io/23.2/reference/jobstep/#pyslurm.JobStep) + - [pyslurm.JobSteps](https://pyslurm.github.io/23.2/reference/jobstep/#pyslurm.JobSteps) + - [pyslurm.JobSubmitDescription](https://pyslurm.github.io/23.2/reference/jobsubmitdescription/#pyslurm.JobSubmitDescription) +- Classes to interact with the Database Job API + - [pyslurm.db.Job](https://pyslurm.github.io/23.2/reference/db/job/#pyslurm.db.Job) + - [pyslurm.db.Jobs](https://pyslurm.github.io/23.2/reference/db/job/#pyslurm.db.Jobs) + - [pyslurm.db.JobStep](https://pyslurm.github.io/23.2/reference/db/jobstep/#pyslurm.db.JobStep) + - [pyslurm.db.JobSearchFilter](https://pyslurm.github.io/23.2/reference/db/jobsearchfilter/#pyslurm.db.JobSearchFilter) +- Classes to interact with the Node API + - [pyslurm.Node](https://pyslurm.github.io/23.2/reference/node/#pyslurm.Node) + - [pyslurm.Nodes](https://pyslurm.github.io/23.2/reference/node/#pyslurm.Nodes) +- Exceptions added: + - [pyslurm.PyslurmError](https://pyslurm.github.io/23.2/reference/exceptions/#pyslurm.PyslurmError) + - [pyslurm.RPCError](https://pyslurm.github.io/23.2/reference/exceptions/#pyslurm.RPCError) +- [Utility Functions](https://pyslurm.github.io/23.2/reference/utilities/#pyslurm.utils) + +### Changes + +- Completely overhaul the documentation, switch to mkdocs +- Rework the tests: Split them into unit and integration tests + +### Deprecated + +- Following classes are superseded by new ones: + - [pyslurm.job](https://pyslurm.github.io/23.2/reference/old/job/#pyslurm.job) + - [pyslurm.node](https://pyslurm.github.io/23.2/reference/old/node/#pyslurm.node) + - [pyslurm.jobstep](https://pyslurm.github.io/23.2/reference/old/jobstep/#pyslurm.jobstep) + - [pyslurm.slurmdb_jobs](https://pyslurm.github.io/23.2/reference/old/db/job/#pyslurm.slurmdb_jobs) + +## [23.2.0](https://github.com/PySlurm/pyslurm/releases/tag/v23.2.0) - 2023-04-07 + +### Added + +- Support for Slurm 23.02.x ([f506d63](https://github.com/PySlurm/pyslurm/commit/f506d63634a9b20bfe475534589300beff4a8843)) + +### Removed + +- `Elasticsearch` debug flag from `get_debug_flags` +- `launch_type`, `launch_params` and `slurmctld_plugstack` keys from the + `config.get()` output +- Some constants (mostly `ESLURM_*` constants that do not exist + anymore) diff --git a/docs/changelog.md b/docs/changelog.md new file mode 100644 index 00000000..786b75d5 --- /dev/null +++ b/docs/changelog.md @@ -0,0 +1 @@ +--8<-- "CHANGELOG.md" diff --git a/mkdocs.yml b/mkdocs.yml index 1341b839..daea3007 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -5,6 +5,12 @@ repo_url: "https://github.com/PySlurm/pyslurm" repo_name: "PySlurm/pyslurm" copyright: Copyright © 2023 PySlurm Developers +nav: + - Home: + - Home: index.md + - Changelog: changelog.md + - ... + theme: name: "material" logo: logo.png diff --git a/pyslurm/__version__.py b/pyslurm/__version__.py index 37000654..a3011f6e 100644 --- a/pyslurm/__version__.py +++ b/pyslurm/__version__.py @@ -1 +1 @@ -__version__ = "23.2.0" +__version__ = "23.2.1" diff --git a/setup.py b/setup.py index 7b96fdc8..98032f6c 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ CYTHON_VERSION_MIN = "0.29.30" SLURM_RELEASE = "23.2" -PYSLURM_PATCH_RELEASE = "0" +PYSLURM_PATCH_RELEASE = "1" SLURM_SHARED_LIB = "libslurm.so" CURRENT_DIR = pathlib.Path(__file__).parent From d5b0076a6f8376f679229929b6130e281838e0f0 Mon Sep 17 00:00:00 2001 From: elelayan Date: Wed, 24 May 2023 21:44:46 +0200 Subject: [PATCH 23/48] fix paths in MANIFEST.in (#295) - correctly use README.md instead of README.rst - folder for documentation is now called "docs" instead of "doc" --- MANIFEST.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index bbe17e2a..a8e09b2b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,8 +1,8 @@ -include README.rst +include README.md include COPYING.txt graft examples graft tests -graft doc +graft docs graft pyslurm/slurm graft pyslurm/pydefines recursive-include pyslurm *.pyx *.px[di] *.h From 8ebc5c52fe0445f818dccd17e8941b2e89e3be8c Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Sat, 27 May 2023 16:03:02 +0200 Subject: [PATCH 24/48] Rework Partition API (#296) - Rework whole Partition API - avoid some duplicate logic in utils/uint.pyx - make the string "UNLIMITED" a constant - include new Partition API in the docs - add tests for new partition API --- docs/reference/constants.md | 7 + docs/reference/old/partition.md | 10 + docs/reference/partition.md | 11 +- docs/reference/utilities.md | 2 +- pyslurm/__init__.py | 2 + pyslurm/constants.py | 32 + pyslurm/core/partition.pxd | 223 +++++++ pyslurm/core/partition.pyx | 866 ++++++++++++++++++++++++++++ pyslurm/core/slurmctld.pxd | 1 + pyslurm/core/slurmctld.pyx | 14 + pyslurm/slurm/extra.pxi | 7 + pyslurm/utils/cstr.pxd | 2 +- pyslurm/utils/cstr.pyx | 10 +- pyslurm/utils/ctime.pyx | 25 +- pyslurm/utils/helpers.pyx | 3 +- pyslurm/utils/uint.pxd | 8 +- pyslurm/utils/uint.pyx | 103 ++-- tests/integration/test_partition.py | 89 +++ tests/integration/util.py | 6 + tests/unit/test_common.py | 42 +- tests/unit/test_partition.py | 98 ++++ 21 files changed, 1478 insertions(+), 83 deletions(-) create mode 100644 docs/reference/constants.md create mode 100644 docs/reference/old/partition.md create mode 100644 pyslurm/constants.py create mode 100644 pyslurm/core/partition.pxd create mode 100644 pyslurm/core/partition.pyx create mode 100644 tests/integration/test_partition.py create mode 100644 tests/unit/test_partition.py diff --git a/docs/reference/constants.md b/docs/reference/constants.md new file mode 100644 index 00000000..dd659b4c --- /dev/null +++ b/docs/reference/constants.md @@ -0,0 +1,7 @@ +--- +title: constants +--- + +::: pyslurm.constants + handler: python + diff --git a/docs/reference/old/partition.md b/docs/reference/old/partition.md new file mode 100644 index 00000000..0e69bbfb --- /dev/null +++ b/docs/reference/old/partition.md @@ -0,0 +1,10 @@ +--- +title: Partition +--- + +!!! warning + This class is superseded by [pyslurm.Partition](../partition.md) and will + be removed in a future release. + +::: pyslurm.partition + handler: python diff --git a/docs/reference/partition.md b/docs/reference/partition.md index 6ab4b865..b9701f55 100644 --- a/docs/reference/partition.md +++ b/docs/reference/partition.md @@ -2,9 +2,12 @@ title: Partition --- -!!! warning - This API is currently being completely reworked, and is subject to be - removed in the future when a replacement is introduced +!!! note + This supersedes the [pyslurm.partition](old/partition.md) class, which + will be removed in a future release -::: pyslurm.partition +::: pyslurm.Partition + handler: python + +::: pyslurm.Partitions handler: python diff --git a/docs/reference/utilities.md b/docs/reference/utilities.md index b290d055..63eb7bc0 100644 --- a/docs/reference/utilities.md +++ b/docs/reference/utilities.md @@ -1,5 +1,5 @@ --- -title: Utilities +title: utils --- ::: pyslurm.utils diff --git a/pyslurm/__init__.py b/pyslurm/__init__.py index 750199da..06bd804b 100644 --- a/pyslurm/__init__.py +++ b/pyslurm/__init__.py @@ -14,6 +14,7 @@ from pyslurm import utils from pyslurm import db +from pyslurm import constants from pyslurm.core.job import ( Job, @@ -23,6 +24,7 @@ JobSubmitDescription, ) from pyslurm.core.node import Node, Nodes +from pyslurm.core.partition import Partition, Partitions from pyslurm.core import error from pyslurm.core.error import ( PyslurmError, diff --git a/pyslurm/constants.py b/pyslurm/constants.py new file mode 100644 index 00000000..0b3c11b0 --- /dev/null +++ b/pyslurm/constants.py @@ -0,0 +1,32 @@ +######################################################################### +# constants.py - pyslurm constants used throughout the project +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# Copyright (C) 2023 PySlurm Developers +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 +"""pyslurm common Constants""" + + +UNLIMITED = "UNLIMITED" +""" +Represents an infinite/unlimited value. This is sometimes returned for +specific attributes as a value to indicate that there is no restriction for it. +""" diff --git a/pyslurm/core/partition.pxd b/pyslurm/core/partition.pxd new file mode 100644 index 00000000..9baeba62 --- /dev/null +++ b/pyslurm/core/partition.pxd @@ -0,0 +1,223 @@ +######################################################################### +# partition.pxd - interface to work with partitions in slurm +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# Copyright (C) 2023 PySlurm Developers +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from libc.string cimport memcpy, memset +from pyslurm cimport slurm +from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t +from pyslurm.slurm cimport ( + partition_info_msg_t, + job_defaults_t, + delete_part_msg_t, + partition_info_t, + update_part_msg_t, + slurm_free_partition_info_members, + slurm_free_partition_info_msg, + slurm_free_update_part_msg, + slurm_init_part_desc_msg, + slurm_load_partitions, + slurm_sprint_cpu_bind_type, + cpu_bind_type_t, + slurm_preempt_mode_string, + slurm_preempt_mode_num, + slurm_create_partition, + slurm_update_partition, + slurm_delete_partition, + xfree, + try_xmalloc, +) +from pyslurm.db.util cimport ( + SlurmList, + SlurmListItem, +) +from pyslurm.utils cimport cstr +from pyslurm.utils cimport ctime +from pyslurm.utils.ctime cimport time_t +from pyslurm.utils.uint cimport * +from pyslurm.core cimport slurmctld + + +cdef class Partitions(dict): + """A collection of [pyslurm.Partition][] objects. + + Args: + partitions (Union[list[str], dict[str, Partition], str], optional=None): + Partitions to initialize this collection with. + + Attributes: + total_cpus (int): + Total amount of CPUs the Partitions in a Collection have + total_nodes (int): + Total amount of Nodes the Partitions in a Collection have + """ + cdef: + partition_info_msg_t *info + partition_info_t tmp_info + + +cdef class Partition: + """A Slurm partition. + + ??? info "Setting Memory related attributes" + + Unless otherwise noted, all attributes in this class representing a + memory value, like `default_memory_per_cpu`, may also be set with a + string that contains suffixes like "K", "M", "G" or "T". + + For example: + + default_memory_per_cpu = "10G" + + This will internally be converted to 10240 (how the Slurm API expects + it) + + Args: + name (str, optional=None): + Name of a Partition + **kwargs (Any, optional=None): + Every attribute of a Partition can be set, except for: + + * total_cpus + * total_nodes + * select_type_parameters + * consumable_resource + + Attributes: + name (str): + Name of the Partition. + allowed_submit_nodes (list[str]): + List of Nodes from which Jobs can be submitted to the partition. + allowed_accounts (list[str]): + List of Accounts which are allowed to execute Jobs + allowed_groups (list[str]): + List of Groups which are allowed to execute Jobs + allowed_qos (list[str]): + List of QoS which are allowed to execute Jobs + alternate (str): + Name of the alternate Partition in case a Partition is down. + consumable_resource (str): + The type of consumable resource used in the Partition. + select_type_parameters (list[str]): + List of additional parameters passed to the select plugin used. + cpu_binding (str): + Default CPU-binding for Jobs that execute in a Partition. + default_memory_per_cpu (int): + Default Memory per CPU for Jobs in this Partition, in Mebibytes. + Mutually exclusive with `default_memory_per_node`. + + This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] + default_memory_per_node (int): + Default Memory per Node for Jobs in this Partition, in Mebibytes. + Mutually exclusive with `default_memory_per_cpu`. + + This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] + max_memory_per_cpu (int): + Max Memory per CPU allowed for Jobs in this Partition, in + Mebibytes. Mutually exclusive with `max_memory_per_node`. + + This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] + max_memory_per_node (int): + Max Memory per Node allowed for Jobs in this Partition, in + Mebibytes. Mutually exclusive with `max_memory_per_cpu` + + This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] + default_time (int): + Default run time-limit in minutes for Jobs that don't specify one. + + This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] + denied_qos (list[str]): + List of QoS that cannot be used in a Partition + denied_accounts (list[str]): + List of Accounts that cannot use a Partition + preemption_grace_time (int): + Grace Time in seconds when a Job is selected for Preemption. + default_cpus_per_gpu (int): + Default CPUs per GPU for Jobs in this Partition + default_memory_per_gpu (int): + Default Memory per GPU, in Mebibytes, for Jobs in this Partition + max_cpus_per_node (int): + Max CPUs per Node allowed for Jobs in this Partition + + This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] + max_cpus_per_socket (int): + Max CPUs per Socket allowed for Jobs in this Partition + + This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] + max_nodes (int): + Max number of Nodes allowed for Jobs + + This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] + min_nodes (int): + Minimum number of Nodes that must be requested by Jobs + max_time_limit (int): + Max Time-Limit in minutes that Jobs can request + + This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] + oversubscribe (str): + The oversubscribe mode for this Partition + nodes (str): + Nodes that are in a Partition + nodesets (list[str]): + List of Nodesets that a Partition has configured + over_time_limit (int): + Limit in minutes that Jobs can exceed their time-limit + + This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] + preempt_mode (str): + Preemption Mode in a Partition + priority_job_factor (int): + The Priority Job Factor for a partition + priority_tier (int): + The priority tier for a Partition + qos (str): + A QoS associated with a Partition, used to extend possible limits + total_cpus (int): + Total number of CPUs available in a Partition + total_nodes (int): + Total number of nodes available in a Partition + state (str): + State the Partition is in + is_default (bool): + Whether this Partition is the default partition or not + allow_root_jobs (bool): + Whether Jobs by the root user are allowed + is_user_exclusive (bool): + Whether nodes will be exclusively allocated to users + is_hidden (bool): + Whether the partition is hidden or not + least_loaded_nodes_scheduling (bool): + Whether Least-Loaded-Nodes scheduling algorithm is used on a + Partition + is_root_only (bool): + Whether only root is able to use a Partition + requires_reservation (bool): + Whether a reservation is required to use a Partition + """ + cdef: + partition_info_t *ptr + int power_save_enabled + slurmctld.Config slurm_conf + + @staticmethod + cdef Partition from_ptr(partition_info_t *in_ptr) diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx new file mode 100644 index 00000000..25e17124 --- /dev/null +++ b/pyslurm/core/partition.pyx @@ -0,0 +1,866 @@ +######################################################################### +# partition.pyx - interface to work with partitions in slurm +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# Copyright (C) 2023 PySlurm Developers +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from typing import Union, Any +from pyslurm.utils import cstr +from pyslurm.utils import ctime +from pyslurm.utils.uint import * +from pyslurm.core.error import RPCError, verify_rpc +from pyslurm.utils.ctime import timestamp_to_date, _raw_time +from pyslurm.constants import UNLIMITED +from pyslurm.utils.helpers import ( + uid_to_name, + gid_to_name, + _getgrall_to_dict, + _getpwall_to_dict, + cpubind_to_num, + instance_to_dict, + _sum_prop, + dehumanize, +) +from pyslurm.utils.ctime import ( + timestr_to_mins, + timestr_to_secs, +) + + +cdef class Partitions(dict): + def __dealloc__(self): + slurm_free_partition_info_msg(self.info) + + def __cinit__(self): + self.info = NULL + + def __init__(self, partitions=None): + if isinstance(partitions, dict): + self.update(partitions) + elif isinstance(partitions, str): + partlist = partitions.split(",") + self.update({part: Partition(part) for part in partlist}) + elif partitions is not None: + for part in partitions: + if isinstance(part, str): + self[part] = Partition(part) + else: + self[part.name] = part + + @staticmethod + def load(): + """Load all Partitions in the system. + + Returns: + (pyslurm.Partitions): Collection of Partition objects. + + Raises: + RPCError: When getting all the Partitions from the slurmctld + failed. + """ + cdef: + Partitions partitions = Partitions.__new__(Partitions) + int flags = slurm.SHOW_ALL + Partition partition + slurmctld.Config slurm_conf + int power_save_enabled = 0 + + verify_rpc(slurm_load_partitions(0, &partitions.info, flags)) + slurm_conf = slurmctld.Config.load() + + # zero-out a dummy partition_info_t + memset(&partitions.tmp_info, 0, sizeof(partition_info_t)) + + if slurm_conf.suspend_program and slurm_conf.resume_program: + power_save_enabled = 1 + + # Put each pointer into its own instance. + for cnt in range(partitions.info.record_count): + partition = Partition.from_ptr(&partitions.info.partition_array[cnt]) + + # Prevent double free if xmalloc fails mid-loop and a MemoryError + # is raised by replacing it with a zeroed-out partition_info_t. + partitions.info.partition_array[cnt] = partitions.tmp_info + + partition.power_save_enabled = power_save_enabled + partition.slurm_conf = slurm_conf + partitions[partition.name] = partition + + # At this point we memcpy'd all the memory for the Partitions. Setting + # this to 0 will prevent the slurm partition free function to + # deallocate the memory for the individual partitions. This should be + # fine, because they are free'd automatically in __dealloc__ since the + # lifetime of each partition-pointer is tied to the lifetime of its + # corresponding "Partition" instance. + partitions.info.record_count = 0 + + return partitions + + def reload(self): + """Reload the information for Partitions in a collection. + + !!! note + + Only information for Partitions which are already in the + collection at the time of calling this method will be reloaded. + + Returns: + (pyslurm.Partitions): Returns self + + Raises: + RPCError: When getting the Partitions from the slurmctld failed. + """ + cdef Partitions reloaded_parts + our_parts = list(self.keys()) + + if not our_parts: + return self + + reloaded_parts = Partitions.load() + for part in our_parts: + if part in reloaded_parts: + # Put the new data in. + self[part] = reloaded_parts[part] + + return self + + def set_state(self, state): + """Modify the State of all Partitions in this Collection. + + Args: + state (str): + Partition state to set + + Raises: + RPCError: When updating the state failed + """ + for part in self.values(): + part.modify(state=state) + + def as_list(self): + """Format the information as list of Partition objects. + + Returns: + (list): List of Partition objects + """ + return list(self.values()) + + @property + def total_cpus(self): + return _sum_prop(self, Partition.total_cpus) + + @property + def total_nodes(self): + return _sum_prop(self, Partition.total_nodes) + + +cdef class Partition: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, name=None, **kwargs): + self._alloc_impl() + self.name = name + for k, v in kwargs.items(): + setattr(self, k, v) + + def _alloc_impl(self): + if not self.ptr: + self.ptr = try_xmalloc(sizeof(partition_info_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for partition_info_t") + + slurm_init_part_desc_msg(self.ptr) + + def _dealloc_impl(self): + slurm_free_partition_info_members(self.ptr) + xfree(self.ptr) + + def __dealloc__(self): + self._dealloc_impl() + + @staticmethod + cdef Partition from_ptr(partition_info_t *in_ptr): + cdef Partition wrap = Partition.__new__(Partition) + wrap._alloc_impl() + memcpy(wrap.ptr, in_ptr, sizeof(partition_info_t)) + return wrap + + def _error_or_name(self): + if not self.name: + raise ValueError("You need to set a Partition name for this " + "instance.") + return self.name + + def as_dict(self): + """Partition information formatted as a dictionary. + + Returns: + (dict): Partition information as dict + + Examples: + >>> import pyslurm + >>> mypart = pyslurm.Partition.load("mypart") + >>> mypart_dict = mypart.as_dict() + """ + return instance_to_dict(self) + + @staticmethod + def load(name): + """Load information for a specific Partition. + + Args: + name (str): + The name of the Partition to load. + + Returns: + (pyslurm.Partition): Returns a new Partition instance. + + Raises: + RPCError: If requesting the Partition information from the + slurmctld was not successful. + + Examples: + >>> import pyslurm + >>> part = pyslurm.Partition.load("normal") + """ + partitions = Partitions.load() + if name not in partitions: + raise RPCError(msg=f"Partition '{name}' doesn't exist") + + return partitions[name] + + def create(self): + """Create a Partition. + + Implements the slurm_create_partition RPC. + + Returns: + (pyslurm.Partition): This function returns the current Partition + instance object itself. + + Raises: + RPCError: If creating the Partition was not successful. + + Examples: + >>> import pyslurm + >>> part = pyslurm.Partition("debug").create() + """ + self._error_or_name() + verify_rpc(slurm_create_partition(self.ptr)) + return self + + def modify(self, **changes): + """Modify a Partition. + + Implements the slurm_update_partition RPC. + + Args: + **changes (Any): + Changes for the Partition. Almost every Attribute from a + Partition can be modified, except for: + + * total_cpus + * total_nodes + * select_type_parameters + * consumable_resource + + Raises: + ValueError: When no changes were specified or when a parsing error + occured. + RPCError: When updating the Partition was not successful. + + Examples: + >>> import pyslurm + >>> + >>> # Modifying the maximum time limit + >>> mypart = pyslurm.Partition("normal") + >>> mypart.modify(max_time_limit="10-00:00:00") + >>> + >>> # Modifying the partition state + >>> mypart.modify(state="DRAIN") + """ + if not changes: + raise ValueError("No changes were specified") + + cdef Partition part = Partition(**changes) + part.name = self._error_or_name() + verify_rpc(slurm_update_partition(part.ptr)) + + def delete(self): + """Delete a Partition. + + Implements the slurm_delete_partition RPC. + + Raises: + RPCError: When deleting the Partition was not successful. + + Examples: + >>> import pyslurm + >>> pyslurm.Partition("normal").delete() + """ + cdef delete_part_msg_t del_part_msg + memset(&del_part_msg, 0, sizeof(del_part_msg)) + + del_part_msg.name = cstr.from_unicode(self._error_or_name()) + verify_rpc(slurm_delete_partition(&del_part_msg)) + + # If using property getter/setter style internally becomes too messy at + # some point, we can easily switch to normal "cdef public" attributes and + # just extract the getter/setter logic into two functions, where one + # creates a pointer from the instance attributes, and the other parses + # pointer values into instance attributes. + # + # From a user perspective nothing would change. + + @property + def name(self): + return cstr.to_unicode(self.ptr.name) + + @name.setter + def name(self, val): + cstr.fmalloc(&self.ptr.name, val) + + @property + def allowed_submit_nodes(self): + return cstr.to_list(self.ptr.allow_alloc_nodes, ["ALL"]) + + @allowed_submit_nodes.setter + def allowed_submit_nodes(self, val): + cstr.from_list(&self.ptr.allow_alloc_nodes, val) + + @property + def allowed_accounts(self): + return cstr.to_list(self.ptr.allow_accounts, ["ALL"]) + + @allowed_accounts.setter + def allowed_accounts(self, val): + cstr.from_list(&self.ptr.allow_accounts, val) + + @property + def allowed_groups(self): + return cstr.to_list(self.ptr.allow_groups, ["ALL"]) + + @allowed_groups.setter + def allowed_groups(self, val): + cstr.from_list(&self.ptr.allow_groups, val) + + @property + def allowed_qos(self): + return cstr.to_list(self.ptr.allow_qos, ["ALL"]) + + @allowed_qos.setter + def allowed_qos(self, val): + cstr.from_list(&self.ptr.allow_qos, val) + + @property + def alternate(self): + return cstr.to_unicode(self.ptr.alternate) + + @alternate.setter + def alternate(self, val): + cstr.fmalloc(&self.ptr.alternate, val) + + @property + def consumable_resource(self): + return _select_type_int_to_cons_res(self.ptr.cr_type) + + @property + def select_type_parameters(self): + return _select_type_int_to_list(self.ptr.cr_type) + + @property + def cpu_binding(self): + cdef char cpu_bind[128] + slurm_sprint_cpu_bind_type(cpu_bind, + self.ptr.cpu_bind) + if cpu_bind == "(null type)": + return None + + return cstr.to_unicode(cpu_bind) + + @cpu_binding.setter + def cpu_binding(self, val): + self.ptr.cpu_bind = cpubind_to_num(val) + + @property + def default_memory_per_cpu(self): + return _get_memory(self.ptr.def_mem_per_cpu, per_cpu=True) + + @default_memory_per_cpu.setter + def default_memory_per_cpu(self, val): + self.ptr.def_mem_per_cpu = u64(dehumanize(val)) + self.ptr.def_mem_per_cpu |= slurm.MEM_PER_CPU + + @property + def default_memory_per_node(self): + return _get_memory(self.ptr.def_mem_per_cpu, per_cpu=False) + + @default_memory_per_node.setter + def default_memory_per_node(self, val): + self.ptr.def_mem_per_cpu = u64(dehumanize(val)) + + @property + def max_memory_per_cpu(self): + return _get_memory(self.ptr.max_mem_per_cpu, per_cpu=True) + + @max_memory_per_cpu.setter + def max_memory_per_cpu(self, val): + self.ptr.max_mem_per_cpu = u64(dehumanize(val)) + self.ptr.max_mem_per_cpu |= slurm.MEM_PER_CPU + + @property + def max_memory_per_node(self): + return _get_memory(self.ptr.max_mem_per_cpu, per_cpu=False) + + @max_memory_per_node.setter + def max_memory_per_node(self, val): + self.ptr.max_mem_per_cpu = u64(dehumanize(val)) + + @property + def default_time(self): + return _raw_time(self.ptr.default_time, on_inf=UNLIMITED) + + @default_time.setter + def default_time(self, val): + self.ptr.default_time = timestr_to_mins(val) + + @property + def denied_qos(self): + return cstr.to_list(self.ptr.deny_qos, ["ALL"]) + + @denied_qos.setter + def denied_qos(self, val): + cstr.from_list(&self.ptr.deny_qos, val) + + @property + def denied_accounts(self): + return cstr.to_list(self.ptr.deny_accounts, ["ALL"]) + + @denied_accounts.setter + def denied_accounts(self, val): + cstr.from_list(&self.ptr.deny_accounts, val) + + @property + def preemption_grace_time(self): + return _raw_time(self.ptr.grace_time) + + @preemption_grace_time.setter + def preemption_grace_time(self, val): + self.ptr.grace_time = timestr_to_secs(val) + + @property + def default_cpus_per_gpu(self): + def_dict = cstr.to_dict(self.ptr.job_defaults_str) + if def_dict and "DefCpuPerGpu" in def_dict: + return int(def_dict["DefCpuPerGpu"]) + + return _extract_job_default_item(slurm.JOB_DEF_CPU_PER_GPU, + self.ptr.job_defaults_list) + + @default_cpus_per_gpu.setter + def default_cpus_per_gpu(self, val): + _concat_job_default_str("DefCpuPerGpu", val, + &self.ptr.job_defaults_str) + + @property + def default_memory_per_gpu(self): + def_dict = cstr.to_dict(self.ptr.job_defaults_str) + if def_dict and "DefMemPerGpu" in def_dict: + return int(def_dict["DefMemPerGpu"]) + + return _extract_job_default_item(slurm.JOB_DEF_MEM_PER_GPU, + self.ptr.job_defaults_list) + + @default_memory_per_gpu.setter + def default_memory_per_gpu(self, val): + _concat_job_default_str("DefMemPerGpu", val, + &self.ptr.job_defaults_str) + + @property + def max_cpus_per_node(self): + return u32_parse(self.ptr.max_cpus_per_node) + + @max_cpus_per_node.setter + def max_cpus_per_node(self, val): + self.ptr.max_cpus_per_node = u32(val) + + @property + def max_cpus_per_socket(self): + return u32_parse(self.ptr.max_cpus_per_socket) + + @max_cpus_per_socket.setter + def max_cpus_per_socket(self, val): + self.ptr.max_cpus_per_socket = u32(val) + + @property + def max_nodes(self): + return u32_parse(self.ptr.max_nodes) + + @max_nodes.setter + def max_nodes(self, val): + self.ptr.max_nodes = u32(val) + + @property + def min_nodes(self): + return u32_parse(self.ptr.min_nodes, zero_is_noval=False) + + @min_nodes.setter + def min_nodes(self, val): + self.ptr.min_nodes = u32(val, zero_is_noval=False) + + @property + def max_time_limit(self): + return _raw_time(self.ptr.max_time, on_inf=UNLIMITED) + + @max_time_limit.setter + def max_time_limit(self, val): + self.ptr.max_time = timestr_to_mins(val) + + @property + def oversubscribe(self): + return _oversubscribe_int_to_str(self.ptr.max_share) + + @oversubscribe.setter + def oversubscribe(self, val): + self.ptr.max_share = _oversubscribe_str_to_int(val) + + @property + def nodes(self): + return cstr.to_unicode(self.ptr.nodes) + + @nodes.setter + def nodes(self, val): + cstr.from_list(&self.ptr.nodes, val) + + @property + def nodesets(self): + return cstr.to_list(self.ptr.nodesets) + + @nodesets.setter + def nodesets(self, val): + cstr.from_list(&self.ptr.nodesets, val) + + @property + def over_time_limit(self): + return u16_parse(self.ptr.over_time_limit) + + @over_time_limit.setter + def over_time_limit(self, val): + self.ptr.over_time_limit = u16(self.ptr.over_time_limit) + + @property + def preempt_mode(self): + return _preempt_mode_int_to_str(self.ptr.preempt_mode, self.slurm_conf) + + @preempt_mode.setter + def preempt_mode(self, val): + self.ptr.preempt_mode = _preempt_mode_str_to_int(val) + + @property + def priority_job_factor(self): + return u16_parse(self.ptr.priority_job_factor) + + @priority_job_factor.setter + def priority_job_factor(self, val): + self.ptr.priority_job_factor = u16(val) + + @property + def priority_tier(self): + return u16_parse(self.ptr.priority_tier) + + @priority_tier.setter + def priority_tier(self, val): + self.ptr.priority_tier = u16(val) + + @property + def qos(self): + return cstr.to_unicode(self.ptr.qos_char) + + @qos.setter + def qos(self, val): + cstr.fmalloc(&self.ptr.qos_char, val) + + @property + def total_cpus(self): + return u32_parse(self.ptr.total_cpus, on_noval=0) + + @property + def total_nodes(self): + return u32_parse(self.ptr.total_nodes, on_noval=0) + + @property + def state(self): + return _partition_state_int_to_str(self.ptr.state_up) + + @state.setter + def state(self, val): + self.ptr.state_up = _partition_state_str_to_int(val) + + @property + def is_default(self): + return u16_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_DEFAULT) + + @is_default.setter + def is_default(self, val): + u16_set_bool_flag(&self.ptr.flags, val, + slurm.PART_FLAG_DEFAULT, slurm.PART_FLAG_DEFAULT_CLR) + + @property + def allow_root_jobs(self): + return u16_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_NO_ROOT) + + @allow_root_jobs.setter + def allow_root_jobs(self, val): + u16_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_NO_ROOT, + slurm.PART_FLAG_NO_ROOT_CLR) + + @property + def is_user_exclusive(self): + return u16_parse_bool_flag(self.ptr.flags, + slurm.PART_FLAG_EXCLUSIVE_USER) + + @is_user_exclusive.setter + def is_user_exclusive(self, val): + u16_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_EXCLUSIVE_USER, + slurm.PART_FLAG_EXC_USER_CLR) + + @property + def is_hidden(self): + return u16_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_HIDDEN) + + @is_hidden.setter + def is_hidden(self, val): + u16_set_bool_flag(&self.ptr.flags, val, + slurm.PART_FLAG_HIDDEN, slurm.PART_FLAG_HIDDEN_CLR) + + @property + def least_loaded_nodes_scheduling(self): + return u16_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_LLN) + + @least_loaded_nodes_scheduling.setter + def least_loaded_nodes_scheduling(self, val): + u16_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_LLN, + slurm.PART_FLAG_LLN_CLR) + + @property + def is_root_only(self): + return u16_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_ROOT_ONLY) + + @is_root_only.setter + def is_root_only(self, val): + u16_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_ROOT_ONLY, + slurm.PART_FLAG_ROOT_ONLY_CLR) + + @property + def requires_reservation(self): + return u16_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_REQ_RESV) + + @requires_reservation.setter + def requires_reservation(self, val): + u16_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_REQ_RESV, + slurm.PART_FLAG_REQ_RESV_CLR) + + # TODO: tres_fmt_str + + +def _partition_state_int_to_str(state): + if state == slurm.PARTITION_UP: + return "UP" + elif state == slurm.PARTITION_DOWN: + return "DOWN" + elif state == slurm.PARTITION_INACTIVE: + return "INACTIVE" + elif state == slurm.PARTITION_DRAIN: + return "DRAIN" + else: + return "UNKNOWN" + + +def _partition_state_str_to_int(state): + state = state.upper() + + if state == "UP": + return slurm.PARTITION_UP + elif state == "DOWN": + return slurm.PARTITION_DOWN + elif state == "INACTIVE": + return slurm.PARTITION_INACTIVE + elif state == "DRAIN": + return slurm.PARTITION_DRAIN + else: + choices = "UP, DOWN, INACTIVE, DRAIN" + raise ValueError(f"Invalid partition state: {state}, valid choices " + f"are {choices}") + + +def _oversubscribe_int_to_str(shared): + if shared == slurm.NO_VAL16: + return None + + is_forced = shared & slurm.SHARED_FORCE + max_jobs = shared & (~slurm.SHARED_FORCE) + + if not max_jobs: + return "EXCLUSIVE" + elif is_forced: + return f"FORCE:{max_jobs}" + elif max_jobs == 1: + return "NO" + else: + return f"YES:{max_jobs}" + + +def _oversubscribe_str_to_int(typ): + typ = typ.upper() + + if typ == "NO": + return 1 + elif typ == "EXCLUSIVE": + return 0 + elif "YES" in typ: + return _split_oversubscribe_str(typ) + elif "FORCE" in typ: + return _split_oversubscribe_str(typ) | slurm.SHARED_FORCE + else: + return slurm.NO_VAL16 + + +def _split_oversubscribe_str(val): + max_jobs = val.split(":", 1) + if len(max_jobs) == 2: + return int(max_jobs[1]) + else: + return 4 + + +def _select_type_int_to_list(stype): + # The rest of the CR_* stuff are just some extra parameters to the select + # plugin + out = [] + + if stype & slurm.CR_OTHER_CONS_RES: + out.append("OTHER_CONS_RES") + + if stype & slurm.CR_ONE_TASK_PER_CORE: + out.append("ONE_TASK_PER_CORE") + + if stype & slurm.CR_PACK_NODES: + out.append("PACK_NODES") + + if stype & slurm.CR_OTHER_CONS_TRES: + out.append("OTHER_CONS_TRES") + + if stype & slurm.CR_CORE_DEFAULT_DIST_BLOCK: + out.append("CORE_DEFAULT_DIST_BLOCK") + + if stype & slurm.CR_LLN: + out.append("LLN") + + return out + + +def _select_type_int_to_cons_res(stype): + # https://github.com/SchedMD/slurm/blob/257ca5e4756a493dc4c793ded3ac3c1a769b3c83/slurm/slurm.h#L996 + # The 3 main select types are mutually exclusive, and may be combined with + # CR_MEMORY + # CR_BOARD exists but doesn't show up in the documentation, so ignore it. + if stype & slurm.CR_CPU and stype & slurm.CR_MEMORY: + return "CPU_MEMORY" + elif stype & slurm.CR_CORE and stype & slurm.CR_MEMORY: + return "CORE_MEMORY" + elif stype & slurm.CR_SOCKET and stype & slurm.CR_MEMORY: + return "SOCKET_MEMORY" + elif stype & slurm.CR_CPU: + return "CPU" + elif stype & slurm.CR_CORE: + return "CORE" + elif stype & slurm.CR_SOCKET: + return "SOCKET" + elif stype & slurm.CR_MEMORY: + return "MEMORY" + else: + return None + + +def _preempt_mode_str_to_int(mode): + if not mode: + return slurm.NO_VAL16 + + pmode = slurm_preempt_mode_num(str(mode)) + if pmode == slurm.NO_VAL16: + raise ValueError(f"Invalid Preempt mode: {mode}") + + return pmode + + +def _preempt_mode_int_to_str(mode, slurmctld.Config slurm_conf): + if mode == slurm.NO_VAL16: + return slurm_conf.preempt_mode if slurm_conf else None + else: + return cstr.to_unicode(slurm_preempt_mode_string(mode)) + + +cdef _extract_job_default_item(typ, slurm.List job_defaults_list): + cdef: + job_defaults_t *default_item + SlurmList job_def_list + SlurmListItem job_def_item + + job_def_list = SlurmList.wrap(job_defaults_list, owned=False) + for job_def_item in job_def_list: + default_item = job_def_item.data + if default_item.type == typ: + return default_item.value + + return None + + +cdef _concat_job_default_str(typ, val, char **job_defaults_str): + cdef uint64_t _val = u64(dehumanize(val)) + + current = cstr.to_dict(job_defaults_str[0]) + if _val == slurm.NO_VAL64: + current.pop(typ, None) + else: + current.update({typ : _val}) + + cstr.from_dict(job_defaults_str, current) + + +def _get_memory(value, per_cpu): + if value != slurm.NO_VAL64: + if value & slurm.MEM_PER_CPU and per_cpu: + if value == slurm.MEM_PER_CPU: + return UNLIMITED + return u64_parse(value & (~slurm.MEM_PER_CPU)) + + # For these values, Slurm interprets 0 as being equal to + # INFINITE/UNLIMITED + elif value == 0 and not per_cpu: + return UNLIMITED + + elif not value & slurm.MEM_PER_CPU and not per_cpu: + return u64_parse(value) + + return None diff --git a/pyslurm/core/slurmctld.pxd b/pyslurm/core/slurmctld.pxd index 0f42fffb..8bafb01f 100644 --- a/pyslurm/core/slurmctld.pxd +++ b/pyslurm/core/slurmctld.pxd @@ -27,6 +27,7 @@ from pyslurm.slurm cimport ( slurm_conf_t, slurm_load_ctl_conf, slurm_free_ctl_conf, + slurm_preempt_mode_string, try_xmalloc, ) from pyslurm.utils cimport cstr diff --git a/pyslurm/core/slurmctld.pyx b/pyslurm/core/slurmctld.pyx index 2b5367c5..7f06966e 100644 --- a/pyslurm/core/slurmctld.pyx +++ b/pyslurm/core/slurmctld.pyx @@ -46,3 +46,17 @@ cdef class Config: @property def cluster(self): return cstr.to_unicode(self.ptr.cluster_name) + + @property + def preempt_mode(self): + cdef char *tmp = slurm_preempt_mode_string(self.ptr.preempt_mode) + return cstr.to_unicode(tmp) + + @property + def suspend_program(self): + return cstr.to_unicode(self.ptr.suspend_program) + + @property + def resume_program(self): + return cstr.to_unicode(self.ptr.resume_program) + diff --git a/pyslurm/slurm/extra.pxi b/pyslurm/slurm/extra.pxi index 0ccb0708..c18db9dc 100644 --- a/pyslurm/slurm/extra.pxi +++ b/pyslurm/slurm/extra.pxi @@ -271,3 +271,10 @@ cdef extern char *slurm_hostlist_deranged_string_malloc(hostlist_t hl) cdef extern void slurmdb_job_cond_def_start_end(slurmdb_job_cond_t *job_cond) cdef extern uint64_t slurmdb_find_tres_count_in_string(char *tres_str_in, int id) + +# +# Slurm Partition functions +# + +cdef extern void slurm_free_update_part_msg(update_part_msg_t *msg) +cdef extern void slurm_free_partition_info_members(partition_info_t *node) diff --git a/pyslurm/utils/cstr.pxd b/pyslurm/utils/cstr.pxd index b1719bde..e8014a5f 100644 --- a/pyslurm/utils/cstr.pxd +++ b/pyslurm/utils/cstr.pxd @@ -31,7 +31,7 @@ cdef to_unicode(char *s, default=*) cdef fmalloc(char **old, val) cdef fmalloc2(char **p1, char **p2, val) cdef free_array(char **arr, count) -cpdef list to_list(char *str_list) +cpdef list to_list(char *str_list, default=*) cdef from_list(char **old, vals, delim=*) cdef from_list2(char **p1, char **p2, vals, delim=*) cpdef dict to_dict(char *str_dict, str delim1=*, str delim2=*) diff --git a/pyslurm/utils/cstr.pyx b/pyslurm/utils/cstr.pyx index 12a39ecb..489d80e8 100644 --- a/pyslurm/utils/cstr.pyx +++ b/pyslurm/utils/cstr.pyx @@ -46,7 +46,7 @@ cdef to_unicode(char *_str, default=None): """Convert a char* to Python3 str (unicode)""" if _str and _str[0] != NULL_BYTE: if _str == NONE_BYTE: - return None + return default return _str else: @@ -96,12 +96,12 @@ cdef fmalloc(char **old, val): old[0] = NULL -cpdef list to_list(char *str_list): +cpdef list to_list(char *str_list, default=[]): """Convert C-String to a list.""" cdef str ret = to_unicode(str_list) if not ret: - return [] + return default return ret.split(",") @@ -137,7 +137,7 @@ cpdef dict to_dict(char *str_dict, str delim1=",", str delim2="="): str key, val dict out = {} - if not _str_dict or delim1 not in _str_dict: + if not _str_dict: return out for kv in _str_dict.split(delim1): @@ -187,7 +187,7 @@ def dict_to_str(vals, prepend=None, delim1=",", delim2="="): for k, v in tmp_dict.items(): if ((delim1 in k or delim2 in k) or - delim1 in v or delim2 in v): + delim1 in str(v) or delim2 in str(v)): raise ValueError( f"Key or Value cannot contain either {delim1} or {delim2}. " f"Got Key: {k} and Value: {v}." diff --git a/pyslurm/utils/ctime.pyx b/pyslurm/utils/ctime.pyx index 5ffbc424..45d7c8e2 100644 --- a/pyslurm/utils/ctime.pyx +++ b/pyslurm/utils/ctime.pyx @@ -23,6 +23,7 @@ # cython: language_level=3 import datetime +from pyslurm.constants import UNLIMITED def timestr_to_secs(timestr): @@ -41,7 +42,7 @@ def timestr_to_secs(timestr): if timestr is None: return slurm.NO_VAL - elif timestr == "unlimited": + elif timestr == UNLIMITED or timestr.casefold() == "unlimited": return slurm.INFINITE if str(timestr).isdigit(): @@ -72,7 +73,9 @@ def timestr_to_mins(timestr): if timestr is None: return slurm.NO_VAL - elif timestr == "unlimited": + elif str(timestr).isdigit(): + return timestr + elif timestr == UNLIMITED or timestr.casefold() == "unlimited": return slurm.INFINITE tmp = cstr.from_unicode(timestr) @@ -111,7 +114,7 @@ def secs_to_timestr(secs, default=None): else: return tmp else: - return "unlimited" + return UNLIMITED def mins_to_timestr(mins, default=None): @@ -141,7 +144,7 @@ def mins_to_timestr(mins, default=None): else: return tmp else: - return "unlimited" + return UNLIMITED def date_to_timestamp(date, on_nodate=0): @@ -204,10 +207,10 @@ def timestamp_to_date(timestamp): return ret -def _raw_time(time, default=None): - if (time == slurm.NO_VAL or - time == 0 or - time == slurm.INFINITE): - return default - - return time +def _raw_time(time, on_noval=None, on_inf=None): + if time == slurm.NO_VAL or time == 0: + return on_noval + elif time == slurm.INFINITE: + return on_inf + else: + return time diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index 3617112e..28604422 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -28,6 +28,7 @@ from os import getuid, getgid from itertools import chain import re import signal +from pyslurm.constants import UNLIMITED MEMORY_UNITS = { @@ -235,7 +236,7 @@ def humanize(num, decimals=1): Returns: (str): Humanized number with appropriate suffix. """ - if num is None or num == "unlimited": + if num is None or num == "unlimited" or num == UNLIMITED: return num num = int(num) diff --git a/pyslurm/utils/uint.pxd b/pyslurm/utils/uint.pxd index 0fd38739..3d8f50e5 100644 --- a/pyslurm/utils/uint.pxd +++ b/pyslurm/utils/uint.pxd @@ -35,9 +35,13 @@ cpdef u32_parse(uint32_t val, on_inf=*, on_noval=*, noval=*, zero_is_noval=*) cpdef u64_parse(uint64_t val, on_inf=*, on_noval=*, noval=*, zero_is_noval=*) cpdef u8_bool(val) cpdef u16_bool(val) +cdef uint_set_bool_flag(flags, boolean, true_flag, false_flag=*) +cdef uint_parse_bool_flag(flags, flag, no_val) +cdef uint_parse_bool(val, no_val) +cdef uint_bool(val, no_val) cdef u8_parse_bool(uint8_t val) cdef u16_parse_bool(uint16_t val) cdef u64_parse_bool_flag(uint64_t flags, flag) -cdef u64_set_bool_flag(uint64_t *flags, boolean, flag_val) +cdef u64_set_bool_flag(uint64_t *flags, boolean, true_flag, false_flag=*) cdef u16_parse_bool_flag(uint16_t flags, flag) -cdef u16_set_bool_flag(uint16_t *flags, boolean, flag_val) +cdef u16_set_bool_flag(uint16_t *flags, boolean, true_flag, false_flag=*) diff --git a/pyslurm/utils/uint.pyx b/pyslurm/utils/uint.pyx index 7418e109..0dae7779 100644 --- a/pyslurm/utils/uint.pyx +++ b/pyslurm/utils/uint.pyx @@ -22,12 +22,14 @@ # cython: c_string_type=unicode, c_string_encoding=default # cython: language_level=3 +from pyslurm.constants import UNLIMITED + cpdef u8(val, inf=False, noval=slurm.NO_VAL8, on_noval=slurm.NO_VAL8, zero_is_noval=True): """Try to convert arbitrary 'val' to uint8_t""" if val is None or (val == 0 and zero_is_noval) or val == noval: return on_noval - elif inf and val == "unlimited": + elif inf and (val == UNLIMITED or val == "unlimited"): return slurm.INFINITE8 else: if isinstance(val, str) and val.isdigit(): @@ -36,7 +38,7 @@ cpdef u8(val, inf=False, noval=slurm.NO_VAL8, on_noval=slurm.NO_VAL8, zero_is_no return val -cpdef u8_parse(uint8_t val, on_inf="unlimited", on_noval=None, noval=slurm.NO_VAL8, zero_is_noval=True): +cpdef u8_parse(uint8_t val, on_inf=UNLIMITED, on_noval=None, noval=slurm.NO_VAL8, zero_is_noval=True): """Convert uint8_t to Python int (with a few situational parameters)""" if val == noval or (val == 0 and zero_is_noval): return on_noval @@ -50,7 +52,7 @@ cpdef u16(val, inf=False, noval=slurm.NO_VAL16, on_noval=slurm.NO_VAL16, zero_is """Try to convert arbitrary 'val' to uint16_t""" if val is None or (val == 0 and zero_is_noval) or val == noval: return on_noval - elif inf and val == "unlimited": + elif inf and (val == UNLIMITED or val == "unlimited"): return slurm.INFINITE16 else: if isinstance(val, str) and val.isdigit(): @@ -59,7 +61,7 @@ cpdef u16(val, inf=False, noval=slurm.NO_VAL16, on_noval=slurm.NO_VAL16, zero_is return val -cpdef u16_parse(uint16_t val, on_inf="unlimited", on_noval=None, noval=slurm.NO_VAL16, zero_is_noval=True): +cpdef u16_parse(uint16_t val, on_inf=UNLIMITED, on_noval=None, noval=slurm.NO_VAL16, zero_is_noval=True): """Convert uint16_t to Python int (with a few situational parameters)""" if val == noval or (val == 0 and zero_is_noval): return on_noval @@ -73,7 +75,7 @@ cpdef u32(val, inf=False, noval=slurm.NO_VAL, on_noval=slurm.NO_VAL, zero_is_nov """Try to convert arbitrary 'val' to uint32_t""" if val is None or (val == 0 and zero_is_noval) or val == noval: return on_noval - elif inf and val == "unlimited": + elif inf and (val == UNLIMITED or val == "unlimited"): return slurm.INFINITE else: if isinstance(val, str) and val.isdigit(): @@ -82,7 +84,7 @@ cpdef u32(val, inf=False, noval=slurm.NO_VAL, on_noval=slurm.NO_VAL, zero_is_nov return val -cpdef u32_parse(uint32_t val, on_inf="unlimited", on_noval=None, noval=slurm.NO_VAL, zero_is_noval=True): +cpdef u32_parse(uint32_t val, on_inf=UNLIMITED, on_noval=None, noval=slurm.NO_VAL, zero_is_noval=True): """Convert uint32_t to Python int (with a few situational parameters)""" if val == noval or (val == 0 and zero_is_noval): return on_noval @@ -96,7 +98,7 @@ cpdef u64(val, inf=False, noval=slurm.NO_VAL64, on_noval=slurm.NO_VAL64, zero_is """Try to convert arbitrary 'val' to uint64_t""" if val is None or (val == 0 and zero_is_noval) or val == noval: return on_noval - elif inf and val == "unlimited": + elif inf and (val == UNLIMITED or val == "unlimited"): return slurm.INFINITE64 else: if isinstance(val, str) and val.isdigit(): @@ -105,7 +107,7 @@ cpdef u64(val, inf=False, noval=slurm.NO_VAL64, on_noval=slurm.NO_VAL64, zero_is return val -cpdef u64_parse(uint64_t val, on_inf="unlimited", on_noval=None, noval=slurm.NO_VAL64, zero_is_noval=True): +cpdef u64_parse(uint64_t val, on_inf=UNLIMITED, on_noval=None, noval=slurm.NO_VAL64, zero_is_noval=True): """Convert uint64_t to Python int (with a few situational parameters)""" if val == noval or (val == 0 and zero_is_noval): return on_noval @@ -115,67 +117,72 @@ cpdef u64_parse(uint64_t val, on_inf="unlimited", on_noval=None, noval=slurm.NO_ return val -cpdef u8_bool(val): - if val is None: - return slurm.NO_VAL8 - elif val: - return 1 +cdef uint_set_bool_flag(flags, boolean, true_flag, false_flag=0): + if boolean: + if false_flag: + flags &= ~false_flag + flags |= true_flag + elif boolean is not None: + if false_flag: + flags |= false_flag + flags &= ~true_flag + + return flags + + +cdef uint_parse_bool_flag(flags, flag, no_val): + if flags == no_val: + return False + + if flags & flag: + return True else: - return 0 + return False -cpdef u16_bool(val): +cdef uint_parse_bool(val, no_val): + if not val or val == no_val: + return False + + return True + + +cdef uint_bool(val, no_val): if val is None: - return slurm.NO_VAL16 + return no_val elif val: return 1 else: return 0 -cdef u8_parse_bool(uint8_t val): - if not val or val == slurm.NO_VAL8: - return False - - return True +cpdef u8_bool(val): + return uint_bool(val, slurm.NO_VAL8) -cdef u16_parse_bool(uint16_t val): - if not val or val == slurm.NO_VAL16: - return False +cpdef u16_bool(val): + return uint_bool(val, slurm.NO_VAL16) - return True +cdef u8_parse_bool(uint8_t val): + return uint_parse_bool(val, slurm.NO_VAL8) -cdef u64_set_bool_flag(uint64_t *flags, boolean, flag_val): - if boolean: - flags[0] |= flag_val - else: - flags[0] &= ~flag_val +cdef u16_parse_bool(uint16_t val): + return uint_parse_bool(val, slurm.NO_VAL16) -cdef u64_parse_bool_flag(uint64_t flags, flag): - if flags == slurm.NO_VAL: - return False - if flags & flag: - return True - else: - return False +cdef u16_set_bool_flag(uint16_t *flags, boolean, true_flag, false_flag=0): + flags[0] = uint_set_bool_flag(flags[0], boolean, true_flag, false_flag) -cdef u16_set_bool_flag(uint16_t *flags, boolean, flag_val): - if boolean: - flags[0] |= flag_val - else: - flags[0] &= ~flag_val +cdef u64_set_bool_flag(uint64_t *flags, boolean, true_flag, false_flag=0): + flags[0] = uint_set_bool_flag(flags[0], boolean, true_flag, false_flag) cdef u16_parse_bool_flag(uint16_t flags, flag): - if flags == slurm.NO_VAL16: - return False + return uint_parse_bool_flag(flags, flag, slurm.NO_VAL16) - if flags & flag: - return True - else: - return False + +cdef u64_parse_bool_flag(uint64_t flags, flag): + return uint_parse_bool_flag(flags, flag, slurm.NO_VAL64) diff --git a/tests/integration/test_partition.py b/tests/integration/test_partition.py new file mode 100644 index 00000000..bc5a28e2 --- /dev/null +++ b/tests/integration/test_partition.py @@ -0,0 +1,89 @@ +######################################################################### +# test_partition.py - partition api integration tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# Copyright (C) 2023 PySlurm Developers +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_partition.py - Test the Partition api functions.""" + +import pytest +import pyslurm +import util +from pyslurm import Partition, Partitions, RPCError + + +def test_load(): + part = Partitions.load().as_list()[0] + + assert part.name + assert part.state + + with pytest.raises(RPCError, + match=f"Partition 'nonexistent' doesn't exist"): + Partition.load("nonexistent") + + +def test_create_delete(): + part = Partition( + name="testpart", + default_time="20-00:00:00", + default_memory_per_cpu=1024, + ) + part.create() + part.delete() + + +def test_modify(): + part = Partitions.load().as_list()[0] + + part.modify(default_time=120) + assert Partition.load(part.name).default_time == 120 + + part.modify(default_time="1-00:00:00") + assert Partition.load(part.name).default_time == 24*60 + + part.modify(default_time="UNLIMITED") + assert Partition.load(part.name).default_time == "UNLIMITED" + + part.modify(state="DRAIN") + assert Partition.load(part.name).state == "DRAIN" + + part.modify(state="UP") + assert Partition.load(part.name).state == "UP" + + +def test_parse_all(): + Partitions.load().as_list()[0].as_dict() + + +def test_reload(): + _partnames = [util.randstr() for i in range(3)] + _tmp_parts = Partitions(_partnames) + for part in _tmp_parts.values(): + part.create() + + all_parts = Partitions.load() + assert len(all_parts) >= 3 + + my_parts = Partitions(_partnames[1:]).reload() + assert len(my_parts) == 2 + for part in my_parts.as_list(): + assert part.state != "UNKNOWN" + + for part in _tmp_parts.values(): + part.delete() diff --git a/tests/integration/util.py b/tests/integration/util.py index f5032f1a..05576052 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -25,6 +25,7 @@ JobSubmitDescription, ) import time +import random, string # Horrendous, but works for now, because when testing against a real slurmctld # we need to wait a bit for state changes (i.e. we cancel a job and @@ -37,6 +38,11 @@ def wait(secs=WAIT_SECS_SLURMCTLD): time.sleep(secs) +def randstr(strlen=10): + chars = string.ascii_lowercase + return ''.join(random.choice(chars) for n in range(strlen)) + + def create_job_script(): job_script = """\ #!/bin/bash diff --git a/tests/unit/test_common.py b/tests/unit/test_common.py index dd812665..47832436 100644 --- a/tests/unit/test_common.py +++ b/tests/unit/test_common.py @@ -23,7 +23,7 @@ import pyslurm import pytest from datetime import datetime -from pyslurm import Job, JobSubmitDescription, Node +from pyslurm import Job, JobSubmitDescription, Node, Partition from pyslurm.utils.ctime import ( timestr_to_mins, timestr_to_secs, @@ -162,8 +162,8 @@ def _uint_impl(self, func_set, func_get, typ): val = func_set(str(2**typ-2)) assert func_get(val) == None - val = func_set("unlimited", inf=True) - assert func_get(val) == "unlimited" + val = func_set("UNLIMITED", inf=True) + assert func_get(val) == "UNLIMITED" val = func_set(0) assert func_get(val) == None @@ -173,7 +173,7 @@ def _uint_impl(self, func_set, func_get, typ): with pytest.raises(TypeError, match="an integer is required"): - val = func_set("unlimited") + val = func_set("UNLIMITED") with pytest.raises(OverflowError, match=r"can't convert negative value to*"): @@ -196,6 +196,28 @@ def test_u32(self): def test_u64(self): self._uint_impl(u64, u64_parse, 64) + def test_set_parse_bool_flag(self): + part = pyslurm.Partition() + + assert not part.is_hidden + + part.is_hidden = True + assert part.is_hidden + + part.is_root_only = True + assert part.is_hidden + assert part.is_root_only + assert not part.is_default + assert not part.allow_root_jobs + + part.is_default = False + part.is_hidden = False + assert not part.is_hidden + assert part.is_root_only + assert not part.is_default + assert not part.allow_root_jobs + + # def _uint_bool_impl(self, arg): # js = JobSubmitDescription() @@ -229,11 +251,11 @@ def test_parse_minutes(self): mins_str = "01:00:00" assert timestr_to_mins(mins_str) == mins - assert timestr_to_mins("unlimited") == 2**32-1 + assert timestr_to_mins("UNLIMITED") == 2**32-1 assert timestr_to_mins(None) == 2**32-2 assert mins_to_timestr(mins) == mins_str - assert mins_to_timestr(2**32-1) == "unlimited" + assert mins_to_timestr(2**32-1) == "UNLIMITED" assert mins_to_timestr(2**32-2) == None assert mins_to_timestr(0) == None @@ -246,11 +268,11 @@ def test_parse_seconds(self): secs_str = "01:00:00" assert timestr_to_secs(secs_str) == secs - assert timestr_to_secs("unlimited") == 2**32-1 + assert timestr_to_secs("UNLIMITED") == 2**32-1 assert timestr_to_secs(None) == 2**32-2 assert secs_to_timestr(secs) == secs_str - assert secs_to_timestr(2**32-1) == "unlimited" + assert secs_to_timestr(2**32-1) == "UNLIMITED" assert secs_to_timestr(2**32-2) == None assert secs_to_timestr(0) == None @@ -327,8 +349,8 @@ def test_humanize(self): val = humanize(800) assert val == "800.0M" - val = humanize("unlimited") - assert val == "unlimited" + val = humanize("UNLIMITED") + assert val == "UNLIMITED" val = humanize(None) assert val == None diff --git a/tests/unit/test_partition.py b/tests/unit/test_partition.py new file mode 100644 index 00000000..141a6e51 --- /dev/null +++ b/tests/unit/test_partition.py @@ -0,0 +1,98 @@ +######################################################################### +# test_partition.py - partition unit tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# Copyright (C) 2023 PySlurm Developers +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_partition.py - Unit Test basic functionality of the Partition class.""" + +import pytest +import pyslurm +from pyslurm import Partition, Partitions + + +def test_create_instance(): + part = Partition("normal") + assert part.name == "normal" + + +def test_create_collection(): + parts = Partitions("part1,part2") + assert len(parts) == 2 + assert "part1" in parts + assert "part2" in parts + assert parts["part1"].name == "part1" + assert parts["part2"].name == "part2" + + parts = Partitions(["part1", "part2"]) + assert len(parts) == 2 + assert "part1" in parts + assert "part2" in parts + assert parts["part1"].name == "part1" + assert parts["part2"].name == "part2" + + parts = Partitions( + { + "part1": Partition("part1"), + "part2": Partition("part2"), + } + ) + assert len(parts) == 2 + assert "part1" in parts + assert "part2" in parts + assert parts["part1"].name == "part1" + assert parts["part2"].name == "part2" + + +def test_parse_all(): + Partition("normal").as_dict() + + +def test_parse_memory(): + part = Partition() + + assert part.default_memory_per_cpu is None + assert part.default_memory_per_node is None + + part.default_memory_per_cpu = "2G" + assert part.default_memory_per_cpu == 2048 + assert part.default_memory_per_node is None + + part.default_memory_per_node = "2G" + assert part.default_memory_per_cpu is None + assert part.default_memory_per_node == 2048 + + +def test_parse_job_defaults(): + part = Partition() + + assert part.default_cpus_per_gpu is None + assert part.default_memory_per_gpu is None + + part.default_cpus_per_gpu = 10 + assert part.default_cpus_per_gpu == 10 + assert part.default_memory_per_gpu is None + + part.default_memory_per_gpu = "10G" + assert part.default_cpus_per_gpu == 10 + assert part.default_memory_per_gpu == 10240 + + part.default_cpus_per_gpu = None + part.default_memory_per_gpu = None + assert part.default_cpus_per_gpu is None + assert part.default_memory_per_gpu is None From a84d23c9d2b2ee18a334ada89dee8f0a9c4a6d57 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Sat, 27 May 2023 23:59:26 +0200 Subject: [PATCH 25/48] Allow db job modification and some fixes (#297) ## Added - Ability to modify Database Jobs - New attributes for a Database Job: - extra - failed_node - Now possible to initialize a pyslurm.db.Jobs collection with existing job ids or pyslurm.db.Job objects ## Fixes - Fixes a problem that prevented loading specific Jobs from the Database if the following two conditions were met: - no start/end time was specified - the Job was older than a day ## Changes - create a _get_exit_code helper function to reduce duplicate code in various places when checking for exit-code or exit-signal of a Job or JobStep --- CHANGELOG.md | 21 +++ pyslurm/core/job/job.pyx | 29 ++-- pyslurm/db/job.pxd | 19 ++- pyslurm/db/job.pyx | 227 +++++++++++++++++++++++++++---- pyslurm/db/step.pyx | 10 +- pyslurm/slurm/extra.pxi | 3 +- pyslurm/utils/helpers.pyx | 14 ++ tests/integration/test_db_job.py | 26 +++- tests/unit/test_db_job.py | 28 +++- 9 files changed, 322 insertions(+), 55 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 18aa6787..d7f10341 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## Unreleased on the [23.2.x](https://github.com/PySlurm/pyslurm/tree/23.2.x) branch + +### Added + +- Ability to modify Database Jobs +- New classes to interact with the Partition API + - [pyslurm.Partition](https://pyslurm.github.io/23.2/reference/partition/#pyslurm.Partition) + - [pyslurm.Partitions](https://pyslurm.github.io/23.2/reference/partition/#pyslurm.Partitions) +- New attributes for a Database Job: + - extra + - failed_node +- Now possible to initialize a pyslurm.db.Jobs collection with existing job + ids or pyslurm.db.Job objects + +### Fixed + +- Fixes a problem that prevented loading specific Jobs from the Database if + the following two conditions were met: + - no start/end time was specified + - the Job was older than a day + ## [23.2.1](https://github.com/PySlurm/pyslurm/releases/tag/v23.2.1) - 2023-05-18 ### Added diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index bab6ca28..a1811582 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -48,6 +48,7 @@ from pyslurm.utils.helpers import ( _getpwall_to_dict, instance_to_dict, _sum_prop, + _get_exit_code, ) @@ -785,35 +786,23 @@ cdef class Job: @property def derived_exit_code(self): - if (self.ptr.derived_ec == slurm.NO_VAL - or not WIFEXITED(self.ptr.derived_ec)): - return None - - return WEXITSTATUS(self.ptr.derived_ec) + ec, _ = _get_exit_code(self.ptr.derived_ec) + return ec @property def derived_exit_code_signal(self): - if (self.ptr.derived_ec == slurm.NO_VAL - or not WIFSIGNALED(self.ptr.derived_ec)): - return None - - return WTERMSIG(self.ptr.derived_ec) + _, sig = _get_exit_code(self.ptr.derived_ec) + return sig @property def exit_code(self): - if (self.ptr.exit_code == slurm.NO_VAL - or not WIFEXITED(self.ptr.exit_code)): - return None - - return WEXITSTATUS(self.ptr.exit_code) + ec, _ = _get_exit_code(self.ptr.exit_code) + return ec @property def exit_code_signal(self): - if (self.ptr.exit_code == slurm.NO_VAL - or not WIFSIGNALED(self.ptr.exit_code)): - return None - - return WTERMSIG(self.ptr.exit_code) + _, sig = _get_exit_code(self.ptr.exit_code) + return sig @property def batch_constraints(self): diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index b592c0f6..e333c062 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -39,6 +39,8 @@ from pyslurm.slurm cimport ( slurmdb_job_cond_def_start_end, slurm_job_state_string, slurm_job_reason_string, + slurmdb_create_job_rec, + slurmdb_job_modify, ) from pyslurm.db.util cimport ( SlurmList, @@ -162,8 +164,17 @@ cdef class Job: job_id (int, optional=0): An Integer representing a Job-ID. - Raises: - MemoryError: If malloc fails to allocate memory. + Other Parameters: + admin_comment (str): + Admin comment for the Job. + comment (str): + Comment for the Job + wckey (str): + Name of the WCKey for this Job + derived_exit_code (int): + Highest exit code of all the Job steps + extra (str): + Arbitrary string that can be stored with a Job. Attributes: steps (pyslurm.db.JobSteps): @@ -209,10 +220,14 @@ cdef class Job: When the Job became eligible to run, as a unix timestamp end_time (int): When the Job ended, as a unix timestamp + extra (str): + Arbitrary string that can be stored with a Job. exit_code (int): Exit code of the job script or salloc. exit_code_signal (int): Signal of the exit code for this Job. + failed_node (str): + Name of the failed node that caused the job to get killed. group_id (int): ID of the group for this Job group_name (str): diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 45eee895..0e12ca37 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -22,9 +22,8 @@ # cython: c_string_type=unicode, c_string_encoding=default # cython: language_level=3 -from os import WIFSIGNALED, WIFEXITED, WTERMSIG, WEXITSTATUS -from typing import Union -from pyslurm.core.error import RPCError +from typing import Union, Any +from pyslurm.core.error import RPCError, PyslurmError from pyslurm.core import slurmctld from typing import Any from pyslurm.utils.uint import * @@ -40,6 +39,7 @@ from pyslurm.utils.helpers import ( uid_to_name, nodelist_to_range_str, instance_to_dict, + _get_exit_code, ) @@ -134,7 +134,6 @@ cdef class JobSearchFilter: ptr.usage_start = date_to_timestamp(self.start_time) ptr.usage_end = date_to_timestamp(self.end_time) - slurmdb_job_cond_def_start_end(ptr) ptr.cpus_min = u32(self.cpus, on_noval=0) ptr.cpus_max = u32(self.max_cpus, on_noval=0) ptr.nodes_min = u32(self.nodes, on_noval=0) @@ -190,12 +189,25 @@ cdef class JobSearchFilter: slurm_list_append(ptr.step_list, selected_step) already_added.append(job_id) + # This must be at the end because it makes decisions based on some + # conditions that might be set. + slurmdb_job_cond_def_start_end(ptr) + cdef class Jobs(dict): - def __init__(self): - # TODO: ability to initialize with existing job objects - pass + def __init__(self, jobs=None): + if isinstance(jobs, dict): + self.update(jobs) + elif isinstance(jobs, str): + joblist = jobs.split(",") + self.update({int(job): Job(job) for job in joblist}) + elif jobs is not None: + for job in jobs: + if isinstance(job, int): + self[job] = Job(job) + else: + self[job.name] = job @staticmethod def load(search_filter=None): @@ -208,6 +220,9 @@ cdef class Jobs(dict): A search filter that the slurmdbd will apply when retrieving Jobs from the database. + Returns: + (pyslurm.db.Jobs): A Collection of database Jobs. + Raises: RPCError: When getting the Jobs from the Database was not sucessful @@ -266,15 +281,134 @@ cdef class Jobs(dict): return jobs + @staticmethod + def modify(search_filter, db_connection=None, **changes): + """Modify Slurm database Jobs. + + Implements the slurm_job_modify RPC. + + Args: + search_filter (Union[pyslurm.db.JobSearchFilter, pyslurm.db.Jobs]): + A filter to decide which Jobs should be modified. + db_connection (pyslurm.db.Connection): + A Connection to the slurmdbd. By default, if no connection is + supplied, one will automatically be created internally. This + means that when the changes were considered successful by the + slurmdbd, those modifications will be **automatically + committed**. + + If you however decide to provide your own Connection instance + (which must be already opened before), and the changes were + successful, they will basically be in a kind of "staging + area". By the time this function returns, the changes are not + actually made. + You are then responsible to decide whether the changes should + be committed or rolled back by using the respective methods on + the connection object. This way, you have a chance to see + which Jobs were modified before you commit the changes. + **changes (Any): + Check the `Other Parameters` Section of [pyslurm.db.Job][] to + see what attributes can be modified. + + Returns: + (list[int]): A list of Jobs that were modified + + Raises: + ValueError: When a parsing error occured or the Database + connection is not open + RPCError: When a failure modifying the Jobs occurred. + + Examples: + In its simplest form, you can do something like this: + + >>> import pyslurm + >>> search_filter = pyslurm.db.JobSearchFilter(ids=[9999]) + >>> modified_jobs = pyslurm.db.Jobs.modify( + ... search_filter, comment="A comment for the job") + >>> print(modified_jobs) + >>> [9999] + + In the above example, the changes will be automatically committed + if successful. + You can however also control this manually by providing your own + connection object: + + >>> import pyslurm + >>> search_filter = pyslurm.db.JobSearchFilter(ids=[9999]) + >>> db_conn = pyslurm.db.Connection.open() + >>> modified_jobs = pyslurm.db.Jobs.modify( + ... search_filter, db_conn, + ... comment="A comment for the job") + >>> # Now you can first examine which Jobs have been modified + >>> print(modified_jobs) + >>> [9999] + >>> # And then you can actually commit (or even rollback) the + >>> # changes + >>> db_conn.commit() + """ + + cdef: + Job job = Job(**changes) + JobSearchFilter jfilter + Connection conn = db_connection + SlurmList response + SlurmListItem response_ptr + list out = [] + + conn = Connection.open() if not conn else conn + if not conn.is_open: + raise ValueError("Database connection is not open") + + if isinstance(search_filter, Jobs): + job_ids = list(search_filter.keys()) + jfilter = JobSearchFilter(ids=job_ids) + else: + jfilter = search_filter + + jfilter._create() + response = SlurmList.wrap( + slurmdb_job_modify(conn.ptr, jfilter.ptr, job.ptr)) + + if not response.is_null and response.cnt: + for response_ptr in response: + response_str = cstr.to_unicode(response_ptr.data) + if not response_str: + continue + + # The strings in the list returned above have a structure + # like this: + # + # " submitted at " + # + # We are just interest in the Job-ID, so extract it + job_id = response_str.split(" ")[0] + if job_id and job_id.isdigit(): + out.append(int(job_id)) + + elif not response.is_null: + # There was no real error, but simply nothing has been modified + raise RPCError(msg="Nothing was modified") + else: + # Autodetects the last slurm error + raise RPCError() + + if not db_connection: + # Autocommit if no connection was explicitly specified. + conn.commit() + + return out + cdef class Job: def __cinit__(self): self.ptr = NULL - def __init__(self, job_id=0): + def __init__(self, job_id=0, **kwargs): self._alloc_impl() self.ptr.jobid = int(job_id) + for k, v in kwargs.items(): + setattr(self, k, v) def __dealloc__(self): self._dealloc_impl() @@ -285,10 +419,7 @@ cdef class Job: def _alloc_impl(self): if not self.ptr: - self.ptr = try_xmalloc( - sizeof(slurmdb_job_rec_t)) - if not self.ptr: - raise MemoryError("xmalloc failed for slurmdb_job_rec_t") + self.ptr = slurmdb_create_job_rec() @staticmethod cdef Job from_ptr(slurmdb_job_rec_t *in_ptr): @@ -307,7 +438,7 @@ cdef class Job: ID of the Job to be loaded. Returns: - (pyslurm.Job): Returns a new Database Job instance + (pyslurm.db.Job): Returns a new Database Job instance Raises: RPCError: If requesting the information for the database Job was @@ -368,6 +499,24 @@ cdef class Job: return out + def modify(self, db_connection=None, **changes): + """Modify a Slurm database Job. + + Args: + db_connection (pyslurm.db.Connection): + A slurmdbd connection. See + [pyslurm.db.Jobs.modify][pyslurm.db.job.Jobs.modify] for more + info + **changes (Any): + Check the `Other Parameters` Section of this class to see what + attributes can be modified. + + Raises: + RPCError: When modifying the Job failed. + """ + cdef JobSearchFilter jfilter = JobSearchFilter(ids=[self.id]) + Jobs.modify(jfilter, db_connection, **changes) + @property def account(self): return cstr.to_unicode(self.ptr.account) @@ -376,6 +525,10 @@ cdef class Job: def admin_comment(self): return cstr.to_unicode(self.ptr.admin_comment) + @admin_comment.setter + def admin_comment(self, val): + cstr.fmalloc(&self.ptr.admin_comment, val) + @property def num_nodes(self): val = TrackableResources.find_count_in_str(self.ptr.tres_alloc_str, @@ -441,24 +594,26 @@ cdef class Job: @property def derived_exit_code(self): - if (self.ptr.derived_ec == slurm.NO_VAL - or not WIFEXITED(self.ptr.derived_ec)): - return None + ec, _ = _get_exit_code(self.ptr.derived_ec) + return ec - return WEXITSTATUS(self.ptr.derived_ec) + @derived_exit_code.setter + def derived_exit_code(self, val): + self.ptr.derived_ec = int(val) @property def derived_exit_code_signal(self): - if (self.ptr.derived_ec == slurm.NO_VAL - or not WIFSIGNALED(self.ptr.derived_ec)): - return None - - return WTERMSIG(self.ptr.derived_ec) + _, sig = _get_exit_code(self.ptr.derived_ec) + return sig @property def comment(self): return cstr.to_unicode(self.ptr.derived_es) + @comment.setter + def comment(self, val): + cstr.fmalloc(&self.ptr.derived_es, val) + @property def elapsed_time(self): return _raw_time(self.ptr.elapsed) @@ -471,18 +626,30 @@ cdef class Job: def end_time(self): return _raw_time(self.ptr.end) + @property + def extra(self): + return cstr.to_unicode(self.ptr.extra) + + @extra.setter + def extra(self, val): + cstr.fmalloc(&self.ptr.extra, val) + @property def exit_code(self): - # TODO - return 0 + ec, _ = _get_exit_code(self.ptr.exitcode) + return ec @property def exit_code_signal(self): - # TODO - return 0 + _, sig = _get_exit_code(self.ptr.exitcode) + return sig # uint32_t flags + @property + def failed_node(self): + return cstr.to_unicode(self.ptr.failed_node) + def group_id(self): return u32_parse(self.ptr.gid, zero_is_noval=False) @@ -593,6 +760,10 @@ cdef class Job: def system_comment(self): return cstr.to_unicode(self.ptr.system_comment) + @system_comment.setter + def system_comment(self, val): + cstr.fmalloc(&self.ptr.system_comment, val) + @property def time_limit(self): # TODO: Perhaps we should just find out what the actual PartitionLimit @@ -615,6 +786,10 @@ cdef class Job: def wckey(self): return cstr.to_unicode(self.ptr.wckey) + @wckey.setter + def wckey(self, val): + cstr.fmalloc(&self.ptr.wckey, val) + # @property # def wckey_id(self): # return u32_parse(self.ptr.wckeyid) diff --git a/pyslurm/db/step.pyx b/pyslurm/db/step.pyx index 6e33c8d1..22a46fa8 100644 --- a/pyslurm/db/step.pyx +++ b/pyslurm/db/step.pyx @@ -31,6 +31,7 @@ from pyslurm.utils.helpers import ( gid_to_name, uid_to_name, instance_to_dict, + _get_exit_code, ) from pyslurm.core.job.util import cpu_freq_int_to_str from pyslurm.core.job.step import humanize_step_id @@ -120,8 +121,13 @@ cdef class JobStep: @property def exit_code(self): - # TODO - return None + ec, _ = _get_exit_code(self.ptr.exitcode) + return ec + + @property + def exit_code_signal(self): + _, sig = _get_exit_code(self.ptr.exitcode) + return sig @property def ntasks(self): diff --git a/pyslurm/slurm/extra.pxi b/pyslurm/slurm/extra.pxi index c18db9dc..fb922ac5 100644 --- a/pyslurm/slurm/extra.pxi +++ b/pyslurm/slurm/extra.pxi @@ -266,11 +266,12 @@ cdef extern from *: cdef extern char *slurm_hostlist_deranged_string_malloc(hostlist_t hl) # -# Slurmdbd functions +# slurmdb functions # cdef extern void slurmdb_job_cond_def_start_end(slurmdb_job_cond_t *job_cond) cdef extern uint64_t slurmdb_find_tres_count_in_string(char *tres_str_in, int id) +cdef extern slurmdb_job_rec_t *slurmdb_create_job_rec() # # Slurm Partition functions diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index 28604422..cbb0ad5d 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -22,6 +22,7 @@ # cython: c_string_type=unicode, c_string_encoding=default # cython: language_level=3 +from os import WIFSIGNALED, WIFEXITED, WTERMSIG, WEXITSTATUS from grp import getgrgid, getgrnam, getgrall from pwd import getpwuid, getpwnam, getpwall from os import getuid, getgid @@ -348,3 +349,16 @@ def _sum_prop(obj, name, startval=0): val += v return val + + +def _get_exit_code(exit_code): + exit_state=sig = 0 + if exit_code != slurm.NO_VAL: + if WIFSIGNALED(exit_code): + exit_state, sig = 0, WTERMSIG(exit_code) + elif WIFEXITED(exit_code): + exit_state, sig = WEXITSTATUS(exit_code), 0 + if exit_state >= 128: + exit_state -= 128 + + return exit_state, sig diff --git a/tests/integration/test_db_job.py b/tests/integration/test_db_job.py index 2c84ef4f..c344d192 100644 --- a/tests/integration/test_db_job.py +++ b/tests/integration/test_db_job.py @@ -56,8 +56,30 @@ def test_parse_all(submit_job): def test_modify(submit_job): - # TODO - pass + job = submit_job() + util.wait(5) + + jfilter = pyslurm.db.JobSearchFilter(ids=[job.id]) + pyslurm.db.Jobs.modify(jfilter, comment="test comment") + + job = pyslurm.db.Job.load(job.id) + assert job.comment == "test comment" + + +def test_modify_with_existing_conn(submit_job): + job = submit_job() + util.wait(5) + + conn = pyslurm.db.Connection.open() + jfilter = pyslurm.db.JobSearchFilter(ids=[job.id]) + pyslurm.db.Jobs.modify(jfilter, conn, comment="test comment") + + job = pyslurm.db.Job.load(job.id) + assert job.comment != "test comment" + + conn.commit() + job = pyslurm.db.Job.load(job.id) + assert job.comment == "test comment" def test_if_steps_exist(submit_job): diff --git a/tests/unit/test_db_job.py b/tests/unit/test_db_job.py index 43ea5227..c157a650 100644 --- a/tests/unit/test_db_job.py +++ b/tests/unit/test_db_job.py @@ -42,8 +42,32 @@ def test_search_filter(): job_filter._create() -def test_collection_init(): - # TODO +def test_create_collection(): + jobs = pyslurm.db.Jobs("101,102") + assert len(jobs) == 2 + assert 101 in jobs + assert 102 in jobs + assert jobs[101].id == 101 + assert jobs[102].id == 102 + + jobs = pyslurm.db.Jobs([101, 102]) + assert len(jobs) == 2 + assert 101 in jobs + assert 102 in jobs + assert jobs[101].id == 101 + assert jobs[102].id == 102 + + jobs = pyslurm.db.Jobs( + { + 101: pyslurm.db.Job(101), + 102: pyslurm.db.Job(102), + } + ) + assert len(jobs) == 2 + assert 101 in jobs + assert 102 in jobs + assert jobs[101].id == 101 + assert jobs[102].id == 102 assert True From 37f7e2262f0797732533fbf934419e9dc04f6db0 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Fri, 2 Jun 2023 21:55:51 +0200 Subject: [PATCH 26/48] Few fixes and handle modification consistently (#300) - handle modification of objects via `modify()` consistently - use correct list of node states when modifying the node state - remove `consumable_resource` attribute of Partition class again, logic is merged into the `select_type_parameters` attribute --- docs/reference/index.md | 3 + pyslurm/core/job/job.pyx | 4 +- pyslurm/core/job/step.pyx | 8 ++- pyslurm/core/node.pxd | 3 + pyslurm/core/node.pyx | 90 ++++++++++++++++++++++++----- pyslurm/core/partition.pxd | 5 +- pyslurm/core/partition.pyx | 63 ++++++++++---------- pyslurm/db/job.pyx | 41 +++++++------ tests/integration/test_db_job.py | 6 +- tests/integration/test_partition.py | 10 ++-- tests/unit/test_node.py | 34 ++++++++++- 11 files changed, 182 insertions(+), 85 deletions(-) diff --git a/docs/reference/index.md b/docs/reference/index.md index e49352fd..5f66d339 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -41,6 +41,9 @@ The `pyslurm` package is a wrapper around the Slurm C-API * Node API * [pyslurm.Node][] * [pyslurm.Nodes][] +* Partition API + * [pyslurm.Partition][] + * [pyslurm.Partitions][] * New Exceptions * [pyslurm.RPCError][] * [pyslurm.PyslurmError][] diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index a1811582..521a42a9 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -92,7 +92,6 @@ cdef class Jobs(dict): Raises: RPCError: When getting all the Jobs from the slurmctld failed. - MemoryError: If malloc fails to allocate memory. """ cdef: dict passwd = {} @@ -188,7 +187,7 @@ cdef class Jobs(dict): """Format the information as list of Job objects. Returns: - (list): List of Job objects + (list[pyslurm.Job]): List of Job objects """ return list(self.values()) @@ -258,7 +257,6 @@ cdef class Job: Raises: RPCError: If requesting the Job information from the slurmctld was not successful. - MemoryError: If malloc failed to allocate memory. Examples: >>> import pyslurm diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index 4b05aa5b..f6b60d9c 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -279,15 +279,17 @@ cdef class JobStep: step_id = self.ptr.step_id.step_id verify_rpc(slurm_kill_job_step(self.job_id, step_id, 9)) - def modify(self, changes): + def modify(self, JobStep changes): """Modify a job step. Implements the slurm_update_step RPC. Args: changes (pyslurm.JobStep): - Another JobStep object which contains all the changes that - should be applied to this instance. + Another JobStep object that contains all the changes to apply. + Check the `Other Parameters` of the JobStep class to see which + properties can be modified. + Raises: RPCError: When updating the JobStep was not successful. diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index 9ddb7000..19684612 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -121,6 +121,9 @@ cdef class Node: Default CPU-Binding for the node state (str): State of the node + reason (str): + Reason for the Node, typically used along with updating the node + state Attributes: name (str): diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index f869b2ab..9c1ecf30 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -38,6 +38,7 @@ from pyslurm.utils.helpers import ( instance_to_dict, _sum_prop, nodelist_from_range_str, + nodelist_to_range_str, ) @@ -157,10 +158,37 @@ cdef class Nodes(dict): """Format the information as list of Node objects. Returns: - (list): List of Node objects + (list[pyslurm.Node]): List of Node objects """ return list(self.values()) + def modify(self, Node changes): + """Modify all Nodes in a collection. + + Args: + changes (pyslurm.Node): + Another Node object that contains all the changes to apply. + Check the `Other Parameters` of the Node class to see which + properties can be modified. + + Raises: + RPCError: When updating the Node was not successful. + + Examples: + >>> import pyslurm + >>> + >>> nodes = pyslurm.Nodes.load() + >>> # Prepare the changes + >>> changes = pyslurm.Node(state="DRAIN", reason="DRAIN Reason") + >>> # Apply the changes to all the nodes + >>> nodes.modify(changes) + """ + cdef Node n = changes + node_str = nodelist_to_range_str(list(self.keys())) + n._alloc_umsg() + cstr.fmalloc(&n.umsg.node_names, node_str) + verify_rpc(slurm_update_node(n.umsg)) + @property def free_memory(self): return _sum_prop(self, Node.free_memory) @@ -340,15 +368,16 @@ cdef class Node: return self - def modify(self, changes): + def modify(self, Node changes): """Modify a node. Implements the slurm_update_node RPC. Args: changes (pyslurm.Node): - Another Node object which contains all the changes that - should be applied to this instance. + Another Node object that contains all the changes to apply. + Check the `Other Parameters` of the Node class to see which + properties can be modified. Raises: RPCError: When updating the Node was not successful. @@ -356,9 +385,10 @@ cdef class Node: Examples: >>> import pyslurm >>> - >>> mynode = pyslurm.Node("localhost") - >>> changes = pyslurm.Node(weight=100) - >>> # Setting the weight to 100 for the "localhost" node + >>> mynode = pyslurm.Node.load("localhost") + >>> # Prepare the changes + >>> changes = pyslurm.Node(state="DRAIN", reason="DRAIN Reason") + >>> # Modify it >>> mynode.modify(changes) """ cdef Node n = changes @@ -448,6 +478,10 @@ cdef class Node: def reason(self): return cstr.to_unicode(self.info.reason) + @reason.setter + def reason(self, val): + cstr.fmalloc2(&self.info.reason, &self.umsg.reason, val) + @property def reason_user(self): return uid_to_name(self.info.reason_uid, lookup=self.passwd) @@ -667,6 +701,10 @@ cdef class Node: xfree(state) return state_str + @state.setter + def state(self, val): + self.umsg.node_state=self.info.node_state = _node_state_from_str(val) + @property def next_state(self): if ((self.info.next_state != slurm.NO_VAL) @@ -677,10 +715,6 @@ cdef class Node: else: return None - @state.setter - def state(self, val): - self.umsg.node_state=self.info.node_state = _node_state_from_str(val) - @property def cpu_load(self): load = u32_parse(self.info.cpu_load) @@ -694,10 +728,36 @@ cdef class Node: def _node_state_from_str(state, err_on_invalid=True): if not state: return slurm.NO_VAL - - for i in range(slurm.NODE_STATE_END): - if state == slurm_node_state_string(i): - return i + ustate = state.upper() + + # Following states are explicitly possible as per documentation + # https://slurm.schedmd.com/scontrol.html#OPT_State_1 + if ustate == "CANCEL_REBOOT": + return slurm.NODE_STATE_CANCEL_REBOOT + elif ustate == "DOWN": + return slurm.NODE_STATE_DOWN + elif ustate == "DRAIN": + return slurm.NODE_STATE_DRAIN + elif ustate == "FAIL": + return slurm.NODE_STATE_FAIL + elif ustate == "FUTURE": + return slurm.NODE_STATE_FUTURE + elif ustate == "NORESP" or ustate == "NO_RESP": + return slurm.NODE_STATE_NO_RESPOND + elif ustate == "POWER_DOWN": + return slurm.NODE_STATE_POWER_DOWN + elif ustate == "POWER_DOWN_ASAP": + # Drain and mark for power down + return slurm.NODE_STATE_POWER_DOWN | slurm.NODE_STATE_POWER_DRAIN + elif ustate == "POWER_DOWN_FORCE": + # Kill all Jobs and power down + return slurm.NODE_STATE_POWER_DOWN | slurm.NODE_STATE_POWERED_DOWN + elif ustate == "POWER_UP": + return slurm.NODE_STATE_POWER_UP + elif ustate == "RESUME": + return slurm.NODE_RESUME + elif ustate == "UNDRAIN": + return slurm.NODE_STATE_UNDRAIN if err_on_invalid: raise ValueError(f"Invalid Node state: {state}") diff --git a/pyslurm/core/partition.pxd b/pyslurm/core/partition.pxd index 9baeba62..37d6a37c 100644 --- a/pyslurm/core/partition.pxd +++ b/pyslurm/core/partition.pxd @@ -101,7 +101,6 @@ cdef class Partition: * total_cpus * total_nodes * select_type_parameters - * consumable_resource Attributes: name (str): @@ -116,10 +115,8 @@ cdef class Partition: List of QoS which are allowed to execute Jobs alternate (str): Name of the alternate Partition in case a Partition is down. - consumable_resource (str): - The type of consumable resource used in the Partition. select_type_parameters (list[str]): - List of additional parameters passed to the select plugin used. + List of Select type parameters for the select plugin. cpu_binding (str): Default CPU-binding for Jobs that execute in a Partition. default_memory_per_cpu (int): diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 25e17124..99aaf5e8 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -143,18 +143,29 @@ cdef class Partitions(dict): return self - def set_state(self, state): - """Modify the State of all Partitions in this Collection. + def modify(self, changes): + """Modify all Partitions in a Collection. Args: - state (str): - Partition state to set + changes (pyslurm.Partition): + Another Partition object that contains all the changes to + apply. Check the `Other Parameters` of the Partition class to + see which properties can be modified. Raises: - RPCError: When updating the state failed + RPCError: When updating at least one Partition failed. + + Examples: + >>> import pyslurm + >>> + >>> parts = pyslurm.Partitions.load() + >>> # Prepare the changes + >>> changes = pyslurm.Partition(state="DRAIN") + >>> # Apply the changes to all the partitions + >>> parts.modify(changes) """ for part in self.values(): - part.modify(state=state) + part.modify(changes) def as_list(self): """Format the information as list of Partition objects. @@ -270,40 +281,30 @@ cdef class Partition: verify_rpc(slurm_create_partition(self.ptr)) return self - def modify(self, **changes): + def modify(self, Partition changes): """Modify a Partition. Implements the slurm_update_partition RPC. Args: - **changes (Any): - Changes for the Partition. Almost every Attribute from a - Partition can be modified, except for: - - * total_cpus - * total_nodes - * select_type_parameters - * consumable_resource + changes (pyslurm.Partition): + Another Partition object that contains all the changes to + apply. Check the `Other Parameters` of the Partition class to + see which properties can be modified. Raises: - ValueError: When no changes were specified or when a parsing error - occured. RPCError: When updating the Partition was not successful. Examples: >>> import pyslurm >>> - >>> # Modifying the maximum time limit - >>> mypart = pyslurm.Partition("normal") - >>> mypart.modify(max_time_limit="10-00:00:00") - >>> - >>> # Modifying the partition state - >>> mypart.modify(state="DRAIN") + >>> part = pyslurm.Partition.load("normal") + >>> # Prepare the changes + >>> changes = pyslurm.Partition(state="DRAIN") + >>> # Apply the changes to the "normal" Partition + >>> part.modify(changes) """ - if not changes: - raise ValueError("No changes were specified") - - cdef Partition part = Partition(**changes) + cdef Partition part = changes part.name = self._error_or_name() verify_rpc(slurm_update_partition(part.ptr)) @@ -381,10 +382,6 @@ cdef class Partition: def alternate(self, val): cstr.fmalloc(&self.ptr.alternate, val) - @property - def consumable_resource(self): - return _select_type_int_to_cons_res(self.ptr.cr_type) - @property def select_type_parameters(self): return _select_type_int_to_list(self.ptr.cr_type) @@ -757,7 +754,7 @@ def _split_oversubscribe_str(val): def _select_type_int_to_list(stype): # The rest of the CR_* stuff are just some extra parameters to the select # plugin - out = [] + out = _select_type_int_to_cons_res(stype) if stype & slurm.CR_OTHER_CONS_RES: out.append("OTHER_CONS_RES") @@ -800,7 +797,7 @@ def _select_type_int_to_cons_res(stype): elif stype & slurm.CR_MEMORY: return "MEMORY" else: - return None + return [] def _preempt_mode_str_to_int(mode): diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 0e12ca37..27b2391a 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -282,7 +282,7 @@ cdef class Jobs(dict): return jobs @staticmethod - def modify(search_filter, db_connection=None, **changes): + def modify(search_filter, Job changes, db_connection=None): """Modify Slurm database Jobs. Implements the slurm_job_modify RPC. @@ -290,6 +290,11 @@ cdef class Jobs(dict): Args: search_filter (Union[pyslurm.db.JobSearchFilter, pyslurm.db.Jobs]): A filter to decide which Jobs should be modified. + changes (pyslurm.db.Job): + Another [pyslurm.db.Job][] object that contains all the + changes to apply. Check the `Other Parameters` of the + [pyslurm.db.Job][] class to see which properties can be + modified. db_connection (pyslurm.db.Connection): A Connection to the slurmdbd. By default, if no connection is supplied, one will automatically be created internally. This @@ -306,25 +311,21 @@ cdef class Jobs(dict): be committed or rolled back by using the respective methods on the connection object. This way, you have a chance to see which Jobs were modified before you commit the changes. - **changes (Any): - Check the `Other Parameters` Section of [pyslurm.db.Job][] to - see what attributes can be modified. Returns: (list[int]): A list of Jobs that were modified Raises: - ValueError: When a parsing error occured or the Database - connection is not open RPCError: When a failure modifying the Jobs occurred. Examples: In its simplest form, you can do something like this: >>> import pyslurm + >>> >>> search_filter = pyslurm.db.JobSearchFilter(ids=[9999]) - >>> modified_jobs = pyslurm.db.Jobs.modify( - ... search_filter, comment="A comment for the job") + >>> changes = pyslurm.db.Job(comment="A comment for the job") + >>> modified_jobs = pyslurm.db.Jobs.modify(search_filter, changes) >>> print(modified_jobs) >>> [9999] @@ -334,11 +335,13 @@ cdef class Jobs(dict): connection object: >>> import pyslurm - >>> search_filter = pyslurm.db.JobSearchFilter(ids=[9999]) + >>> >>> db_conn = pyslurm.db.Connection.open() + >>> search_filter = pyslurm.db.JobSearchFilter(ids=[9999]) + >>> changes = pyslurm.db.Job(comment="A comment for the job") >>> modified_jobs = pyslurm.db.Jobs.modify( - ... search_filter, db_conn, - ... comment="A comment for the job") + ... search_filter, changes, db_conn) + >>> >>> # Now you can first examine which Jobs have been modified >>> print(modified_jobs) >>> [9999] @@ -348,7 +351,7 @@ cdef class Jobs(dict): """ cdef: - Job job = Job(**changes) + Job job = changes JobSearchFilter jfilter Connection conn = db_connection SlurmList response @@ -499,23 +502,25 @@ cdef class Job: return out - def modify(self, db_connection=None, **changes): + def modify(self, changes, db_connection=None): """Modify a Slurm database Job. Args: + changes (pyslurm.db.Job): + Another [pyslurm.db.Job][] object that contains all the + changes to apply. Check the `Other Parameters` of the + [pyslurm.db.Job][] class to see which properties can be + modified. db_connection (pyslurm.db.Connection): A slurmdbd connection. See [pyslurm.db.Jobs.modify][pyslurm.db.job.Jobs.modify] for more - info - **changes (Any): - Check the `Other Parameters` Section of this class to see what - attributes can be modified. + info on this parameter. Raises: RPCError: When modifying the Job failed. """ cdef JobSearchFilter jfilter = JobSearchFilter(ids=[self.id]) - Jobs.modify(jfilter, db_connection, **changes) + Jobs.modify(jfilter, changes, db_connection) @property def account(self): diff --git a/tests/integration/test_db_job.py b/tests/integration/test_db_job.py index c344d192..36005935 100644 --- a/tests/integration/test_db_job.py +++ b/tests/integration/test_db_job.py @@ -60,7 +60,8 @@ def test_modify(submit_job): util.wait(5) jfilter = pyslurm.db.JobSearchFilter(ids=[job.id]) - pyslurm.db.Jobs.modify(jfilter, comment="test comment") + changes = pyslurm.db.Job(comment="test comment") + pyslurm.db.Jobs.modify(jfilter, changes) job = pyslurm.db.Job.load(job.id) assert job.comment == "test comment" @@ -72,7 +73,8 @@ def test_modify_with_existing_conn(submit_job): conn = pyslurm.db.Connection.open() jfilter = pyslurm.db.JobSearchFilter(ids=[job.id]) - pyslurm.db.Jobs.modify(jfilter, conn, comment="test comment") + changes = pyslurm.db.Job(comment="test comment") + pyslurm.db.Jobs.modify(jfilter, changes, conn) job = pyslurm.db.Job.load(job.id) assert job.comment != "test comment" diff --git a/tests/integration/test_partition.py b/tests/integration/test_partition.py index bc5a28e2..fcfcf4af 100644 --- a/tests/integration/test_partition.py +++ b/tests/integration/test_partition.py @@ -51,19 +51,19 @@ def test_create_delete(): def test_modify(): part = Partitions.load().as_list()[0] - part.modify(default_time=120) + part.modify(Partition(default_time=120)) assert Partition.load(part.name).default_time == 120 - part.modify(default_time="1-00:00:00") + part.modify(Partition(default_time="1-00:00:00")) assert Partition.load(part.name).default_time == 24*60 - part.modify(default_time="UNLIMITED") + part.modify(Partition(default_time="UNLIMITED")) assert Partition.load(part.name).default_time == "UNLIMITED" - part.modify(state="DRAIN") + part.modify(Partition(state="DRAIN")) assert Partition.load(part.name).state == "DRAIN" - part.modify(state="UP") + part.modify(Partition(state="UP")) assert Partition.load(part.name).state == "UP" diff --git a/tests/unit/test_node.py b/tests/unit/test_node.py index 2caf8d37..f2b5594a 100644 --- a/tests/unit/test_node.py +++ b/tests/unit/test_node.py @@ -23,6 +23,7 @@ import pytest import pyslurm from pyslurm import Node, Nodes +from pyslurm.core.node import _node_state_from_str def test_create_instance(): @@ -35,8 +36,37 @@ def test_parse_all(): def test_create_nodes_collection(): - # TODO - assert True + nodes = Nodes("node1,node2") + assert len(nodes) == 2 + assert "node1" in nodes + assert "node2" in nodes + assert nodes["node1"].name == "node1" + assert nodes["node2"].name == "node2" + + nodes = Nodes(["node1", "node2"]) + assert len(nodes) == 2 + assert "node1" in nodes + assert "node2" in nodes + assert nodes["node1"].name == "node1" + assert nodes["node2"].name == "node2" + + nodes = Nodes( + { + "node1": Node("node1"), + "node2": Node("node2"), + } + ) + assert len(nodes) == 2 + assert "node1" in nodes + assert "node2" in nodes + assert nodes["node1"].name == "node1" + assert nodes["node2"].name == "node2" + + +def test_set_node_state(): + assert _node_state_from_str("RESUME") + assert _node_state_from_str("undrain") + assert _node_state_from_str("POWER_DOWN") def test_setting_attributes(): From 3a603e1876d7bf8da2f545a926eca6d0521da314 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Mon, 5 Jun 2023 21:38:26 +0200 Subject: [PATCH 27/48] Fix parsing user names in JobSearchFilter (#303) Fixes #302 - also makes it clearer in the docs for some attributes which type is expected inside the lists. --- pyslurm/db/job.pxd | 50 +++++++++++++++++++-------------------- pyslurm/db/job.pyx | 20 ++-------------- pyslurm/utils/helpers.pyx | 8 +++---- tests/unit/test_common.py | 10 ++++---- tests/unit/test_db_job.py | 2 +- 5 files changed, 38 insertions(+), 52 deletions(-) diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index e333c062..0faac2fd 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -56,72 +56,72 @@ from pyslurm.db.tres cimport TrackableResources, TrackableResource cdef class JobSearchFilter: - """Search conditions for Slurm database Jobs. + """Query-Conditions for Jobs in the Slurm Database. Args: **kwargs (Any, optional=None): Any valid attribute of the object. Attributes: - ids (list): + ids (list[int]): A list of Job ids to search for. start_time (Union[str, int, datetime.datetime]): Search for Jobs which started after this time. end_time (Union[str, int, datetime.datetime]): Search for Jobs which ended before this time. - accounts (list): + accounts (list[str]): Search for Jobs with these account names. - association_ids (list): + association_ids (list[int]): Search for Jobs with these association ids. - clusters (list): + clusters (list[str]): Search for Jobs running in these clusters. - constraints (list): + constraints (list[str]): Search for Jobs with these constraints. cpus (int): Search for Jobs with exactly this many CPUs. - Note: If you also specify max_cpus, then this value will act as + Note: If you also specify `max_cpus`, then this value will act as the minimum. max_cpus (int): Search for Jobs with no more than this amount of CPUs. - Note: This value has no effect without also setting cpus. + Note: This value has no effect without also setting `cpus`. nodes (int): Search for Jobs with exactly this many nodes. - Note: If you also specify max_nodes, then this value will act as + Note: If you also specify `max_nodes`, then this value will act as the minimum. max_nodes (int): Search for Jobs with no more than this amount of nodes. - Note: This value has no effect without also setting nodes. - qos (list): + Note: This value has no effect without also setting `nodes`. + qos (list[str]): Search for Jobs with these Qualities of Service. - names (list): + names (list[str]): Search for Jobs with these job names. - partitions (list): + partitions (list[str]): Search for Jobs with these partition names. - groups (list): - Search for Jobs with these group names. You can both specify the - groups as string or by their GID. + groups (list[str]): + Search for Jobs with these group names. Alternatively, you can + also specify the GIDs directly. timelimit (Union[str, int]): Search for Jobs with exactly this timelimit. - Note: If you also specify max_timelimit, then this value will act + Note: If you also specify `max_timelimit`, then this value will act as the minimum. max_timelimit (Union[str, int]): Search for Jobs which run no longer than this timelimit - Note: This value has no effect without also setting timelimit - users (list): - Search for Jobs with these user names. You can both specify the - users as string or by their UID. - wckeys (list): + Note: This value has no effect without also setting `timelimit` + users (list[str]): + Search for Jobs with these user names. Alternatively, you can also + specify the UIDs directly. + wckeys (list[str]): Search for Jobs with these WCKeys - nodelist (list): + nodelist (list[str]): Search for Jobs that ran on any of these Nodes with_script (bool): Instruct the slurmdbd to also send the job script(s) Note: This requires specifying explictiy job ids, and is mutually - exclusive with with_env + exclusive with `with_env` with_env (bool): Instruct the slurmdbd to also send the job environment(s) Note: This requires specifying explictiy job ids, and is mutually - exclusive with with_script + exclusive with `with_script` """ cdef slurmdb_job_cond_t *ptr diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 27b2391a..af86f704 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -87,28 +87,12 @@ cdef class JobSearchFilter: def _parse_groups(self): if not self.groups: return None - - gid_list = [] - for group in self.groups: - if isinstance(group, int): - gid_list.append(group) - else: - gid_list.append(group_to_gid(group)) - - return gid_list + return list({group_to_gid(group) for group in self.groups}) def _parse_users(self): if not self.users: return None - - uid_list = [] - for user in self.users: - if not isinstance(user, list): - uid_list.append(int(user)) - elif user: - uid_list.append(user_to_uid(user)) - - return uid_list + return list({user_to_uid(user) for user in self.users}) def _parse_clusters(self): if not self.clusters: diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index cbb0ad5d..fcfe9965 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -95,10 +95,10 @@ def user_to_uid(user, err_on_invalid=True): return slurm.NO_VAL try: - if isinstance(user, str): + if isinstance(user, str) and not user.isdigit(): return getpwnam(user).pw_uid - return getpwuid(user).pw_uid + return getpwuid(int(user)).pw_uid except KeyError as e: if err_on_invalid: raise e @@ -112,10 +112,10 @@ def group_to_gid(group, err_on_invalid=True): return slurm.NO_VAL try: - if isinstance(group, str): + if isinstance(group, str) and not group.isdigit(): return getgrnam(group).gr_gid - return getgrgid(group).gr_gid + return getgrgid(int(group)).gr_gid except KeyError as e: if err_on_invalid: raise e diff --git a/tests/unit/test_common.py b/tests/unit/test_common.py index 47832436..48f4fecf 100644 --- a/tests/unit/test_common.py +++ b/tests/unit/test_common.py @@ -309,8 +309,9 @@ def test_parse_uid(self): name = uid_to_name(0, lookup=lookup) assert name == "root" - uid = user_to_uid("root") - assert uid == 0 + assert user_to_uid("root") == 0 + assert user_to_uid(0) == 0 + assert user_to_uid("0") == 0 with pytest.raises(KeyError): name = uid_to_name(2**32-5) @@ -326,8 +327,9 @@ def test_parse_gid(self): name = gid_to_name(0, lookup=lookup) assert name == "root" - gid = group_to_gid("root") - assert gid == 0 + assert group_to_gid("root") == 0 + assert group_to_gid(0) == 0 + assert group_to_gid("0") == 0 with pytest.raises(KeyError): name = gid_to_name(2**32-5) diff --git a/tests/unit/test_db_job.py b/tests/unit/test_db_job.py index c157a650..9391f04a 100644 --- a/tests/unit/test_db_job.py +++ b/tests/unit/test_db_job.py @@ -24,7 +24,7 @@ import pyslurm -def test_search_filter(): +def test_filter(): job_filter = pyslurm.db.JobSearchFilter() job_filter.clusters = ["test1"] From 485722ba7304418decf7da207ca80e22ee730ff7 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Sun, 25 Jun 2023 19:20:52 +0200 Subject: [PATCH 28/48] Implement DB Associations API and a little refactoring. (#304) - All collections inherit from `list` instead of `dict` now (this change is mostly motivated by multi-cluster support that will probably be added in the future) - Added work-in-progress API for database `Associations` - renamed `JobSearchFilter` to `JobFilter` --- CHANGELOG.md | 10 +- docs/reference/db/jobfilter.md | 6 + docs/reference/db/jobsearchfilter.md | 6 - docs/reference/index.md | 2 +- pyslurm/__init__.py | 10 +- pyslurm/core/job/job.pxd | 2 +- pyslurm/core/job/job.pyx | 92 ++++-- pyslurm/core/job/step.pxd | 7 +- pyslurm/core/job/step.pyx | 121 ++++--- pyslurm/core/node.pxd | 4 +- pyslurm/core/node.pyx | 73 +++-- pyslurm/core/partition.pxd | 4 +- pyslurm/core/partition.pyx | 73 +++-- pyslurm/db/__init__.py | 9 +- pyslurm/db/assoc.pxd | 87 +++++ pyslurm/db/assoc.pyx | 455 +++++++++++++++++++++++++++ pyslurm/db/cluster.pxd | 27 ++ pyslurm/db/cluster.pyx | 31 ++ pyslurm/db/connection.pyx | 10 + pyslurm/db/job.pxd | 10 +- pyslurm/db/job.pyx | 196 +++++++----- pyslurm/db/qos.pxd | 11 +- pyslurm/db/qos.pyx | 97 ++++-- pyslurm/db/step.pyx | 2 +- pyslurm/db/tres.pxd | 43 ++- pyslurm/db/tres.pyx | 264 +++++++++++++++- pyslurm/db/util.pxd | 1 + pyslurm/db/util.pyx | 9 + pyslurm/slurm/extra.pxi | 5 + pyslurm/utils/cstr.pyx | 2 +- pyslurm/utils/helpers.pyx | 70 +++++ tests/integration/test_db_job.py | 6 +- tests/integration/test_db_qos.py | 2 +- tests/integration/test_job.py | 2 +- tests/integration/test_job_steps.py | 8 +- tests/integration/test_node.py | 6 +- tests/integration/test_partition.py | 13 +- tests/unit/test_common.py | 61 +++- tests/unit/test_db_job.py | 5 +- tests/unit/test_db_qos.py | 2 +- tests/unit/test_job_steps.py | 2 +- tests/unit/test_node.py | 6 +- tests/unit/test_partition.py | 6 +- 43 files changed, 1541 insertions(+), 317 deletions(-) create mode 100644 docs/reference/db/jobfilter.md delete mode 100644 docs/reference/db/jobsearchfilter.md create mode 100644 pyslurm/db/assoc.pxd create mode 100644 pyslurm/db/assoc.pyx create mode 100644 pyslurm/db/cluster.pxd create mode 100644 pyslurm/db/cluster.pyx diff --git a/CHANGELOG.md b/CHANGELOG.md index d7f10341..df972286 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - failed_node - Now possible to initialize a pyslurm.db.Jobs collection with existing job ids or pyslurm.db.Job objects +- Added `as_dict` function to all Collections ### Fixed @@ -26,6 +27,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - no start/end time was specified - the Job was older than a day +### Changed + +- All Collections (like [pyslurm.Jobs](https://pyslurm.github.io/23.2/reference/job/#pyslurm.Jobs)) inherit from `list` now instead of `dict` +- `JobSearchFilter` has been renamed to `JobFilter` + ## [23.2.1](https://github.com/PySlurm/pyslurm/releases/tag/v23.2.1) - 2023-05-18 ### Added @@ -40,7 +46,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - [pyslurm.db.Job](https://pyslurm.github.io/23.2/reference/db/job/#pyslurm.db.Job) - [pyslurm.db.Jobs](https://pyslurm.github.io/23.2/reference/db/job/#pyslurm.db.Jobs) - [pyslurm.db.JobStep](https://pyslurm.github.io/23.2/reference/db/jobstep/#pyslurm.db.JobStep) - - [pyslurm.db.JobSearchFilter](https://pyslurm.github.io/23.2/reference/db/jobsearchfilter/#pyslurm.db.JobSearchFilter) + - [pyslurm.db.JobFilter](https://pyslurm.github.io/23.2/reference/db/jobsearchfilter/#pyslurm.db.JobFilter) - Classes to interact with the Node API - [pyslurm.Node](https://pyslurm.github.io/23.2/reference/node/#pyslurm.Node) - [pyslurm.Nodes](https://pyslurm.github.io/23.2/reference/node/#pyslurm.Nodes) @@ -49,7 +55,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - [pyslurm.RPCError](https://pyslurm.github.io/23.2/reference/exceptions/#pyslurm.RPCError) - [Utility Functions](https://pyslurm.github.io/23.2/reference/utilities/#pyslurm.utils) -### Changes +### Changed - Completely overhaul the documentation, switch to mkdocs - Rework the tests: Split them into unit and integration tests diff --git a/docs/reference/db/jobfilter.md b/docs/reference/db/jobfilter.md new file mode 100644 index 00000000..21aa55d1 --- /dev/null +++ b/docs/reference/db/jobfilter.md @@ -0,0 +1,6 @@ +--- +title: JobFilter +--- + +::: pyslurm.db.JobFilter + handler: python diff --git a/docs/reference/db/jobsearchfilter.md b/docs/reference/db/jobsearchfilter.md deleted file mode 100644 index fa3864c5..00000000 --- a/docs/reference/db/jobsearchfilter.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: JobSearchFilter ---- - -::: pyslurm.db.JobSearchFilter - handler: python diff --git a/docs/reference/index.md b/docs/reference/index.md index 5f66d339..35a6c678 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -37,7 +37,7 @@ The `pyslurm` package is a wrapper around the Slurm C-API * [pyslurm.db.Job][] * [pyslurm.db.JobStep][] * [pyslurm.db.Jobs][] - * [pyslurm.db.JobSearchFilter][] + * [pyslurm.db.JobFilter][] * Node API * [pyslurm.Node][] * [pyslurm.Nodes][] diff --git a/pyslurm/__init__.py b/pyslurm/__init__.py index 06bd804b..4d3a5101 100644 --- a/pyslurm/__init__.py +++ b/pyslurm/__init__.py @@ -9,11 +9,15 @@ sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL) +# Initialize slurm api +from pyslurm.api import slurm_init, slurm_fini +slurm_init() + from .pyslurm import * from .__version__ import __version__ -from pyslurm import utils from pyslurm import db +from pyslurm import utils from pyslurm import constants from pyslurm.core.job import ( @@ -32,10 +36,6 @@ ) from pyslurm.core import slurmctld -# Initialize slurm api -from pyslurm.api import slurm_init, slurm_fini -slurm_init() - def version(): return __version__ diff --git a/pyslurm/core/job/job.pxd b/pyslurm/core/job/job.pxd index d1c8ddf8..bee4f9ec 100644 --- a/pyslurm/core/job/job.pxd +++ b/pyslurm/core/job/job.pxd @@ -67,7 +67,7 @@ from pyslurm.slurm cimport ( ) -cdef class Jobs(dict): +cdef class Jobs(list): """A collection of [pyslurm.Job][] objects. Args: diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 521a42a9..2c33d581 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -34,6 +34,7 @@ from typing import Union from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.job.util import * +from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.core.error import ( RPCError, verify_rpc, @@ -47,12 +48,14 @@ from pyslurm.utils.helpers import ( _getgrall_to_dict, _getpwall_to_dict, instance_to_dict, + collection_to_dict, + group_collection_by_cluster, _sum_prop, _get_exit_code, ) -cdef class Jobs(dict): +cdef class Jobs(list): def __cinit__(self): self.info = NULL @@ -63,14 +66,37 @@ cdef class Jobs(dict): def __init__(self, jobs=None, frozen=False): self.frozen = frozen - if isinstance(jobs, dict): - self.update(jobs) - elif jobs is not None: + if isinstance(jobs, list): for job in jobs: if isinstance(job, int): - self[job] = Job(job) + self.append(Job(job)) else: - self[job.id] = job + self.append(job) + elif isinstance(jobs, str): + joblist = jobs.split(",") + self.extend([Job(int(job)) for job in joblist]) + elif isinstance(jobs, dict): + self.extend([job for job in jobs.values()]) + elif jobs is not None: + raise TypeError("Invalid Type: {type(jobs)}") + + def as_dict(self, recursive=False): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + + Returns: + (dict): Collection as a dict. + """ + col = collection_to_dict(self, identifier=Job.id, recursive=recursive) + return col.get(LOCAL_CLUSTER, {}) + + def group_by_cluster(self): + return group_collection_by_cluster(self) @staticmethod def load(preload_passwd_info=False, frozen=False): @@ -124,7 +150,7 @@ cdef class Jobs(dict): job.passwd = passwd job.groups = groups - jobs[job.id] = job + jobs.append(job) # At this point we memcpy'd all the memory for the Jobs. Setting this # to 0 will prevent the slurm job free function to deallocate the @@ -143,28 +169,34 @@ cdef class Jobs(dict): Raises: RPCError: When getting the Jobs from the slurmctld failed. """ - cdef Jobs reloaded_jobs = Jobs.load() + cdef: + Jobs reloaded_jobs + Jobs new_jobs = Jobs() + dict self_dict - for jid in list(self.keys()): + if not self: + return self + + reloaded_jobs = Jobs.load().as_dict() + for idx, jid in enumerate(self): if jid in reloaded_jobs: # Put the new data in. - self[jid] = reloaded_jobs[jid] - elif not self.frozen: - # Remove this instance from the current collection, as the Job - # doesn't exist anymore. - del self[jid] + new_jobs.append(reloaded_jobs[jid]) if not self.frozen: + self_dict = self.as_dict() for jid in reloaded_jobs: - if jid not in self: - self[jid] = reloaded_jobs[jid] + if jid not in self_dict: + new_jobs.append(reloaded_jobs[jid]) + self.clear() + self.extend(new_jobs) return self def load_steps(self): """Load all Job steps for this collection of Jobs. - This function fills in the "steps" attribute for all Jobs in the + This function fills in the `steps` attribute for all Jobs in the collection. !!! note @@ -175,21 +207,16 @@ cdef class Jobs(dict): RPCError: When retrieving the Job information for all the Steps failed. """ - cdef dict step_info = JobSteps.load_all() + cdef dict steps = JobSteps.load().as_dict() - for jid in self: + for idx, job in enumerate(self): # Ignore any Steps from Jobs which do not exist in this # collection. - if jid in step_info: - self[jid].steps = step_info[jid] - - def as_list(self): - """Format the information as list of Job objects. - - Returns: - (list[pyslurm.Job]): List of Job objects - """ - return list(self.values()) + jid = job.id + if jid in steps: + job_steps = self[idx].steps + job_steps.clear() + job_steps.extend(steps[jid].values()) @property def memory(self): @@ -218,6 +245,7 @@ cdef class Job: self.ptr.job_id = job_id self.passwd = {} self.groups = {} + cstr.fmalloc(&self.ptr.cluster, LOCAL_CLUSTER) self.steps = JobSteps.__new__(JobSteps) def _alloc_impl(self): @@ -234,7 +262,9 @@ cdef class Job: self._dealloc_impl() def __eq__(self, other): - return isinstance(other, Job) and self.id == other.id + if isinstance(other, Job): + return self.id == other.id and self.cluster == other.cluster + return NotImplemented @staticmethod def load(job_id): @@ -278,7 +308,7 @@ cdef class Job: if not slurm.IS_JOB_PENDING(wrap.ptr): # Just ignore if the steps couldn't be loaded here. try: - wrap.steps = JobSteps._load(wrap) + wrap.steps = JobSteps._load_single(wrap) except RPCError: pass else: diff --git a/pyslurm/core/job/step.pxd b/pyslurm/core/job/step.pxd index 087742d6..458ee506 100644 --- a/pyslurm/core/job/step.pxd +++ b/pyslurm/core/job/step.pxd @@ -49,7 +49,7 @@ from pyslurm.utils.ctime cimport time_t from pyslurm.core.job.task_dist cimport TaskDistribution -cdef class JobSteps(dict): +cdef class JobSteps(list): """A collection of [pyslurm.JobStep][] objects for a given Job. Args: @@ -64,11 +64,12 @@ cdef class JobSteps(dict): cdef: job_step_info_response_msg_t *info job_step_info_t tmp_info + _job_id @staticmethod - cdef JobSteps _load(Job job) + cdef JobSteps _load_single(Job job) - cdef dict _get_info(self, uint32_t job_id, int flags) + cdef _load_data(self, uint32_t job_id, int flags) cdef class JobStep: diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index f6b60d9c..d4038f54 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -26,10 +26,15 @@ from typing import Union from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc +from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.utils.helpers import ( signal_to_num, instance_to_dict, uid_to_name, + collection_to_dict, + group_collection_by_cluster, + humanize_step_id, + dehumanize_step_id, ) from pyslurm.core.job.util import cpu_freq_int_to_str from pyslurm.utils.ctime import ( @@ -41,7 +46,7 @@ from pyslurm.utils.ctime import ( ) -cdef class JobSteps(dict): +cdef class JobSteps(list): def __dealloc__(self): slurm_free_job_step_info_response_msg(self.info) @@ -49,44 +54,74 @@ cdef class JobSteps(dict): def __cinit__(self): self.info = NULL - def __init__(self): - pass + def __init__(self, steps=None): + if isinstance(steps, list): + self.extend(steps) + elif steps is not None: + raise TypeError("Invalid Type: {type(steps)}") + + def as_dict(self, recursive=False): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + + Returns: + (dict): Collection as a dict. + """ + col = collection_to_dict(self, identifier=JobStep.id, + recursive=recursive, group_id=JobStep.job_id) + col = col.get(LOCAL_CLUSTER, {}) + if self._job_id: + return col.get(self._job_id, {}) + + return col + + def group_by_cluster(self): + return group_collection_by_cluster(self) @staticmethod - def load(job): - """Load the Steps for a specific Job + def load(job_id=0): + """Load the Job Steps from the system. Args: - job (Union[Job, int]): - The Job for which the Steps should be loaded + job_id (Union[Job, int]): + The Job for which the Steps should be loaded. Returns: (pyslurm.JobSteps): JobSteps of the Job """ - cdef Job _job - _job = Job.load(job.id) if isinstance(job, Job) else Job.load(job) - return JobSteps._load(_job) + cdef: + Job job + JobSteps steps + + if job_id: + job = Job.load(job_id.id if isinstance(job_id, Job) else job_id) + steps = JobSteps._load_single(job) + steps._job_id = job.id + return steps + else: + steps = JobSteps() + return steps._load_data(0, slurm.SHOW_ALL) @staticmethod - cdef JobSteps _load(Job job): - cdef JobSteps steps = JobSteps.__new__(JobSteps) + cdef JobSteps _load_single(Job job): + cdef JobSteps steps = JobSteps() - step_info = steps._get_info(job.id, slurm.SHOW_ALL) - if not step_info and not slurm.IS_JOB_PENDING(job.ptr): + steps._load_data(job.id, slurm.SHOW_ALL) + if not steps and not slurm.IS_JOB_PENDING(job.ptr): msg = f"Failed to load step info for Job {job.id}." raise RPCError(msg=msg) - # No super().__init__() needed? Cython probably already initialized - # the dict automatically. - steps.update(step_info[job.id]) return steps - cdef dict _get_info(self, uint32_t job_id, int flags): + cdef _load_data(self, uint32_t job_id, int flags): cdef: JobStep step - JobSteps steps uint32_t cnt = 0 - dict out = {} rc = slurm_get_job_steps(0, job_id, slurm.NO_VAL, &self.info, flags) @@ -102,12 +137,7 @@ cdef class JobSteps(dict): # Prevent double free if xmalloc fails mid-loop and a MemoryError # is raised by replacing it with a zeroed-out job_step_info_t. self.info.job_steps[cnt] = self.tmp_info - - if not step.job_id in out: - steps = JobSteps.__new__(JobSteps) - out[step.job_id] = steps - - out[step.job_id].update({step.id: step}) + self.append(step) # At this point we memcpy'd all the memory for the Steps. Setting this # to 0 will prevent the slurm step free function to deallocate the @@ -117,18 +147,7 @@ cdef class JobSteps(dict): # instance. self.info.job_step_count = 0 - return out - - @staticmethod - def load_all(): - """Loads all the steps in the system. - - Returns: - (dict): A dict where every JobID (key) is mapped with an instance - of its JobSteps (value). - """ - cdef JobSteps steps = JobSteps.__new__(JobSteps) - return steps._get_info(slurm.NO_VAL, slurm.SHOW_ALL) + return self cdef class JobStep: @@ -425,29 +444,3 @@ cdef class JobStep: @property def slurm_protocol_version(self): return u32_parse(self.ptr.start_protocol_ver) - - -def humanize_step_id(sid): - if sid == slurm.SLURM_BATCH_SCRIPT: - return "batch" - elif sid == slurm.SLURM_EXTERN_CONT: - return "extern" - elif sid == slurm.SLURM_INTERACTIVE_STEP: - return "interactive" - elif sid == slurm.SLURM_PENDING_STEP: - return "pending" - else: - return sid - - -def dehumanize_step_id(sid): - if sid == "batch": - return slurm.SLURM_BATCH_SCRIPT - elif sid == "extern": - return slurm.SLURM_EXTERN_CONT - elif sid == "interactive": - return slurm.SLURM_INTERACTIVE_STEP - elif sid == "pending": - return slurm.SLURM_PENDING_STEP - else: - return int(sid) diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index 19684612..ea59e6ff 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -57,7 +57,7 @@ from pyslurm.utils.ctime cimport time_t from pyslurm.utils.uint cimport * -cdef class Nodes(dict): +cdef class Nodes(list): """A collection of [pyslurm.Node][] objects. Args: @@ -233,6 +233,8 @@ cdef class Node: dict passwd dict groups + cdef readonly cluster + @staticmethod cdef _swap_data(Node dst, Node src) diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 9c1ecf30..609016fe 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -28,6 +28,7 @@ from pyslurm.utils import ctime from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time +from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -36,13 +37,15 @@ from pyslurm.utils.helpers import ( _getpwall_to_dict, cpubind_to_num, instance_to_dict, + collection_to_dict, + group_collection_by_cluster, _sum_prop, nodelist_from_range_str, nodelist_to_range_str, ) -cdef class Nodes(dict): +cdef class Nodes(list): def __dealloc__(self): slurm_free_node_info_msg(self.info) @@ -53,17 +56,38 @@ cdef class Nodes(dict): self.part_info = NULL def __init__(self, nodes=None): - if isinstance(nodes, dict): - self.update(nodes) - elif isinstance(nodes, str): - nodelist = nodelist_from_range_str(nodes) - self.update({node: Node(node) for node in nodelist}) - elif nodes is not None: + if isinstance(nodes, list): for node in nodes: if isinstance(node, str): - self[node] = Node(node) + self.append(Node(node)) else: - self[node.name] = node + self.append(node) + elif isinstance(nodes, str): + nodelist = nodes.split(",") + self.extend([Node(node) for node in nodelist]) + elif isinstance(nodes, dict): + self.extend([node for node in nodes.values()]) + elif nodes is not None: + raise TypeError("Invalid Type: {type(nodes)}") + + def as_dict(self, recursive=False): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + + Returns: + (dict): Collection as a dict. + """ + col = collection_to_dict(self, identifier=Node.name, + recursive=recursive) + return col.get(LOCAL_CLUSTER, {}) + + def group_by_cluster(self): + return group_collection_by_cluster(self) @staticmethod def load(preload_passwd_info=False): @@ -117,7 +141,7 @@ cdef class Nodes(dict): node.passwd = passwd node.groups = groups - nodes[node.name] = node + nodes.append(node) # At this point we memcpy'd all the memory for the Nodes. Setting this # to 0 will prevent the slurm node free function to deallocate the @@ -141,27 +165,19 @@ cdef class Nodes(dict): RPCError: When getting the Nodes from the slurmctld failed. """ cdef Nodes reloaded_nodes - our_nodes = list(self.keys()) - if not our_nodes: - return None + if not self: + return self - reloaded_nodes = Nodes.load() - for node in list(self.keys()): + reloaded_nodes = Nodes.load().as_dict() + for idx, node in enumerate(self): + node_name = node.name if node in reloaded_nodes: # Put the new data in. - self[node] = reloaded_nodes[node] + self[idx] = reloaded_nodes[node_name] return self - def as_list(self): - """Format the information as list of Node objects. - - Returns: - (list[pyslurm.Node]): List of Node objects - """ - return list(self.values()) - def modify(self, Node changes): """Modify all Nodes in a collection. @@ -183,8 +199,11 @@ cdef class Nodes(dict): >>> # Apply the changes to all the nodes >>> nodes.modify(changes) """ - cdef Node n = changes - node_str = nodelist_to_range_str(list(self.keys())) + cdef: + Node n = changes + list node_names = [node.name for node in self] + + node_str = nodelist_to_range_str(node_names) n._alloc_umsg() cstr.fmalloc(&n.umsg.node_names, node_str) verify_rpc(slurm_update_node(n.umsg)) @@ -235,6 +254,7 @@ cdef class Node: def __init__(self, name=None, **kwargs): self._alloc_impl() self.name = name + self.cluster = LOCAL_CLUSTER for k, v in kwargs.items(): setattr(self, k, v) @@ -282,6 +302,7 @@ cdef class Node: wrap._alloc_info() wrap.passwd = {} wrap.groups = {} + wrap.cluster = LOCAL_CLUSTER memcpy(wrap.info, in_ptr, sizeof(node_info_t)) return wrap diff --git a/pyslurm/core/partition.pxd b/pyslurm/core/partition.pxd index 37d6a37c..b10366b8 100644 --- a/pyslurm/core/partition.pxd +++ b/pyslurm/core/partition.pxd @@ -58,7 +58,7 @@ from pyslurm.utils.uint cimport * from pyslurm.core cimport slurmctld -cdef class Partitions(dict): +cdef class Partitions(list): """A collection of [pyslurm.Partition][] objects. Args: @@ -216,5 +216,7 @@ cdef class Partition: int power_save_enabled slurmctld.Config slurm_conf + cdef readonly cluster + @staticmethod cdef Partition from_ptr(partition_info_t *in_ptr) diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 99aaf5e8..56375d33 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -30,6 +30,7 @@ from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time from pyslurm.constants import UNLIMITED +from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -37,6 +38,8 @@ from pyslurm.utils.helpers import ( _getpwall_to_dict, cpubind_to_num, instance_to_dict, + collection_to_dict, + group_collection_by_cluster, _sum_prop, dehumanize, ) @@ -46,7 +49,8 @@ from pyslurm.utils.ctime import ( ) -cdef class Partitions(dict): +cdef class Partitions(list): + def __dealloc__(self): slurm_free_partition_info_msg(self.info) @@ -54,17 +58,38 @@ cdef class Partitions(dict): self.info = NULL def __init__(self, partitions=None): - if isinstance(partitions, dict): - self.update(partitions) - elif isinstance(partitions, str): - partlist = partitions.split(",") - self.update({part: Partition(part) for part in partlist}) - elif partitions is not None: + if isinstance(partitions, list): for part in partitions: if isinstance(part, str): - self[part] = Partition(part) + self.append(Partition(part)) else: - self[part.name] = part + self.append(part) + elif isinstance(partitions, str): + partlist = partitions.split(",") + self.extend([Partition(part) for part in partlist]) + elif isinstance(partitions, dict): + self.extend([part for part in partitions.values()]) + elif partitions is not None: + raise TypeError("Invalid Type: {type(partitions)}") + + def as_dict(self, recursive=False): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + + Returns: + (dict): Collection as a dict. + """ + col = collection_to_dict(self, identifier=Partition.name, + recursive=recursive) + return col.get(LOCAL_CLUSTER, {}) + + def group_by_cluster(self): + return group_collection_by_cluster(self) @staticmethod def load(): @@ -103,7 +128,7 @@ cdef class Partitions(dict): partition.power_save_enabled = power_save_enabled partition.slurm_conf = slurm_conf - partitions[partition.name] = partition + partitions.append(partition) # At this point we memcpy'd all the memory for the Partitions. Setting # this to 0 will prevent the slurm partition free function to @@ -129,17 +154,17 @@ cdef class Partitions(dict): Raises: RPCError: When getting the Partitions from the slurmctld failed. """ - cdef Partitions reloaded_parts - our_parts = list(self.keys()) + cdef dict reloaded_parts - if not our_parts: + if not self: return self - reloaded_parts = Partitions.load() - for part in our_parts: - if part in reloaded_parts: + reloaded_parts = Partitions.load().as_dict() + for idx, part in enumerate(self): + part_name = part.name + if part_name in reloaded_parts: # Put the new data in. - self[part] = reloaded_parts[part] + self[idx] = reloaded_parts[part_name] return self @@ -164,17 +189,9 @@ cdef class Partitions(dict): >>> # Apply the changes to all the partitions >>> parts.modify(changes) """ - for part in self.values(): + for part in self: part.modify(changes) - def as_list(self): - """Format the information as list of Partition objects. - - Returns: - (list): List of Partition objects - """ - return list(self.values()) - @property def total_cpus(self): return _sum_prop(self, Partition.total_cpus) @@ -192,6 +209,7 @@ cdef class Partition: def __init__(self, name=None, **kwargs): self._alloc_impl() self.name = name + self.cluster = LOCAL_CLUSTER for k, v in kwargs.items(): setattr(self, k, v) @@ -214,6 +232,7 @@ cdef class Partition: cdef Partition from_ptr(partition_info_t *in_ptr): cdef Partition wrap = Partition.__new__(Partition) wrap._alloc_impl() + wrap.cluster = LOCAL_CLUSTER memcpy(wrap.ptr, in_ptr, sizeof(partition_info_t)) return wrap @@ -255,7 +274,7 @@ cdef class Partition: >>> import pyslurm >>> part = pyslurm.Partition.load("normal") """ - partitions = Partitions.load() + partitions = Partitions.load().as_dict() if name not in partitions: raise RPCError(msg=f"Partition '{name}' doesn't exist") diff --git a/pyslurm/db/__init__.py b/pyslurm/db/__init__.py index bb34e232..0e78a734 100644 --- a/pyslurm/db/__init__.py +++ b/pyslurm/db/__init__.py @@ -25,6 +25,7 @@ from .job import ( Job, Jobs, + JobFilter, JobSearchFilter, ) from .tres import ( @@ -34,5 +35,11 @@ from .qos import ( QualitiesOfService, QualityOfService, - QualityOfServiceSearchFilter, + QualityOfServiceFilter, ) +from .assoc import ( + Associations, + Association, + AssociationFilter, +) +from . import cluster diff --git a/pyslurm/db/assoc.pxd b/pyslurm/db/assoc.pxd new file mode 100644 index 00000000..12a0cde1 --- /dev/null +++ b/pyslurm/db/assoc.pxd @@ -0,0 +1,87 @@ +######################################################################### +# assoc.pxd - pyslurm slurmdbd association api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm cimport slurm +from pyslurm.slurm cimport ( + slurmdb_assoc_rec_t, + slurmdb_assoc_cond_t, + slurmdb_associations_get, + slurmdb_destroy_assoc_rec, + slurmdb_destroy_assoc_cond, + slurmdb_init_assoc_rec, + slurmdb_associations_modify, + try_xmalloc, +) +from pyslurm.db.util cimport ( + SlurmList, + SlurmListItem, + make_char_list, + slurm_list_to_pylist, + qos_list_to_pylist, +) +from pyslurm.db.tres cimport ( + _set_tres_limits, + TrackableResources, + TrackableResourceLimits, +) +from pyslurm.db.connection cimport Connection +from pyslurm.utils cimport cstr +from pyslurm.utils.uint cimport * +from pyslurm.db.qos cimport QualitiesOfService, _set_qos_list + +cdef _parse_assoc_ptr(Association ass) +cdef _create_assoc_ptr(Association ass, conn=*) + + +cdef class Associations(list): + pass + + +cdef class AssociationFilter: + cdef slurmdb_assoc_cond_t *ptr + + cdef public: + users + ids + + +cdef class Association: + cdef: + slurmdb_assoc_rec_t *ptr + dict qos_data + dict tres_data + + cdef public: + group_tres + group_tres_mins + group_tres_run_mins + max_tres_mins_per_job + max_tres_run_mins_per_user + max_tres_per_job + max_tres_per_node + qos + + @staticmethod + cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr) + diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx new file mode 100644 index 00000000..d1ac4789 --- /dev/null +++ b/pyslurm/db/assoc.pyx @@ -0,0 +1,455 @@ +######################################################################### +# assoc.pyx - pyslurm slurmdbd association api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm.core.error import RPCError +from pyslurm.utils.helpers import ( + instance_to_dict, + collection_to_dict, + group_collection_by_cluster, + user_to_uid, +) +from pyslurm.utils.uint import * +from pyslurm.db.connection import _open_conn_or_error +from pyslurm.db.cluster import LOCAL_CLUSTER + + +cdef class Associations(list): + + def __init__(self): + pass + + def as_dict(self, recursive=False, group_by_cluster=False): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + group_by_cluster (bool, optional): + By default, only the Jobs from your local Cluster are + returned. If this is set to `True`, then all the Jobs in the + collection will be grouped by the Cluster - with the name of + the cluster as the key and the value being the collection as + another dict. + + Returns: + (dict): Collection as a dict. + """ + col = collection_to_dict(self, identifier=Association.id, + recursive=recursive) + if not group_by_cluster: + return col.get(LOCAL_CLUSTER, {}) + + return col + + def group_by_cluster(self): + return group_collection_by_cluster(self) + + @staticmethod + def load(AssociationFilter db_filter=None, Connection db_connection=None): + cdef: + Associations out = Associations() + Association assoc + AssociationFilter cond = db_filter + SlurmList assoc_data + SlurmListItem assoc_ptr + Connection conn + dict qos_data + dict tres_data + + # Prepare SQL Filter + if not db_filter: + cond = AssociationFilter() + cond._create() + + # Setup DB Conn + conn = _open_conn_or_error(db_connection) + + # Fetch Assoc Data + assoc_data = SlurmList.wrap(slurmdb_associations_get( + conn.ptr, cond.ptr)) + + if assoc_data.is_null: + raise RPCError(msg="Failed to get Association data from slurmdbd") + + # Fetch other necessary dependencies needed for translating some + # attributes (i.e QoS IDs to its name) + qos_data = QualitiesOfService.load(db_connection=conn).as_dict( + name_is_key=False) + tres_data = TrackableResources.load(db_connection=conn).as_dict( + name_is_key=False) + + # Setup Association objects + for assoc_ptr in SlurmList.iter_and_pop(assoc_data): + assoc = Association.from_ptr(assoc_ptr.data) + assoc.qos_data = qos_data + assoc.tres_data = tres_data + _parse_assoc_ptr(assoc) + out.append(assoc) + + return out + + @staticmethod + def modify(db_filter, Association changes, Connection db_connection=None): + cdef: + AssociationFilter afilter + Connection conn + SlurmList response + SlurmListItem response_ptr + list out = [] + + # Prepare SQL Filter + if isinstance(db_filter, Associations): + assoc_ids = [ass.id for ass in db_filter] + afilter = AssociationFilter(ids=assoc_ids) + else: + afilter = db_filter + afilter._create() + + # Setup DB conn + conn = _open_conn_or_error(db_connection) + + # Any data that isn't parsed yet or needs validation is done in this + # function. + _create_assoc_ptr(changes, conn) + + # Modify associations, get the result + # This returns a List of char* with the associations that were + # modified + response = SlurmList.wrap(slurmdb_associations_modify( + conn.ptr, afilter.ptr, changes.ptr)) + + if not response.is_null and response.cnt: + for response_ptr in response: + response_str = cstr.to_unicode(response_ptr.data) + if not response_str: + continue + + # TODO: Better format + out.append(response_str) + + elif not response.is_null: + # There was no real error, but simply nothing has been modified + raise RPCError(msg="Nothing was modified") + else: + # Autodetects the last slurm error + raise RPCError() + + if not db_connection: + # Autocommit if no connection was explicitly specified. + conn.commit() + + return out + + +cdef class AssociationFilter: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + + def __dealloc__(self): + self._dealloc() + + def _dealloc(self): + slurmdb_destroy_assoc_cond(self.ptr) + self.ptr = NULL + + def _alloc(self): + self._dealloc() + self.ptr = try_xmalloc(sizeof(slurmdb_assoc_cond_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for slurmdb_assoc_cond_t") + + def _parse_users(self): + if not self.users: + return None + return list({user_to_uid(user) for user in self.users}) + + def _create(self): + self._alloc() + cdef slurmdb_assoc_cond_t *ptr = self.ptr + + make_char_list(&ptr.user_list, self.users) + + +cdef class Association: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, **kwargs): + self._alloc_impl() + self.id = 0 + self.cluster = LOCAL_CLUSTER + for k, v in kwargs.items(): + setattr(self, k, v) + + def __dealloc__(self): + self._dealloc_impl() + + def _dealloc_impl(self): + slurmdb_destroy_assoc_rec(self.ptr) + self.ptr = NULL + + def _alloc_impl(self): + if not self.ptr: + self.ptr = try_xmalloc( + sizeof(slurmdb_assoc_rec_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for slurmdb_assoc_rec_t") + + slurmdb_init_assoc_rec(self.ptr, 0) + + @staticmethod + cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr): + cdef Association wrap = Association.__new__(Association) + wrap.ptr = in_ptr + return wrap + + def as_dict(self): + """Database Association information formatted as a dictionary. + + Returns: + (dict): Database Association information as dict + """ + return instance_to_dict(self) + + def __eq__(self, other): + if isinstance(other, Association): + return self.id == other.id and self.cluster == other.cluster + return NotImplemented + + @property + def account(self): + return cstr.to_unicode(self.ptr.acct) + + @account.setter + def account(self, val): + cstr.fmalloc(&self.ptr.acct, val) + + @property + def cluster(self): + return cstr.to_unicode(self.ptr.cluster) + + @cluster.setter + def cluster(self, val): + cstr.fmalloc(&self.ptr.cluster, val) + + @property + def comment(self): + return cstr.to_unicode(self.ptr.comment) + + @comment.setter + def comment(self, val): + cstr.fmalloc(&self.ptr.comment, val) + + # uint32_t def_qos_id + + # uint16_t flags (ASSOC_FLAG_*) + + @property + def group_jobs(self): + return u32_parse(self.ptr.grp_jobs, zero_is_noval=False) + + @group_jobs.setter + def group_jobs(self, val): + self.ptr.grp_jobs = u32(val, zero_is_noval=False) + + @property + def group_jobs_accrue(self): + return u32_parse(self.ptr.grp_jobs_accrue, zero_is_noval=False) + + @group_jobs_accrue.setter + def group_jobs_accrue(self, val): + self.ptr.grp_jobs_accrue = u32(val, zero_is_noval=False) + + @property + def group_submit_jobs(self): + return u32_parse(self.ptr.grp_submit_jobs, zero_is_noval=False) + + @group_submit_jobs.setter + def group_submit_jobs(self, val): + self.ptr.grp_submit_jobs = u32(val, zero_is_noval=False) + + @property + def group_wall_time(self): + return u32_parse(self.ptr.grp_wall, zero_is_noval=False) + + @group_wall_time.setter + def group_wall_time(self, val): + self.ptr.grp_wall = u32(val, zero_is_noval=False) + + @property + def id(self): + return u32_parse(self.ptr.id) + + @id.setter + def id(self, val): + self.ptr.id = val + + @property + def is_default(self): + return u16_parse_bool(self.ptr.is_def) + + @property + def lft(self): + return u32_parse(self.ptr.lft) + + @property + def max_jobs(self): + return u32_parse(self.ptr.max_jobs, zero_is_noval=False) + + @max_jobs.setter + def max_jobs(self, val): + self.ptr.max_jobs = u32(val, zero_is_noval=False) + + @property + def max_jobs_accrue(self): + return u32_parse(self.ptr.max_jobs_accrue, zero_is_noval=False) + + @max_jobs_accrue.setter + def max_jobs_accrue(self, val): + self.ptr.max_jobs_accrue = u32(val, zero_is_noval=False) + + @property + def max_submit_jobs(self): + return u32_parse(self.ptr.max_submit_jobs, zero_is_noval=False) + + @max_submit_jobs.setter + def max_submit_jobs(self, val): + self.ptr.max_submit_jobs = u32(val, zero_is_noval=False) + + @property + def max_wall_time_per_job(self): + return u32_parse(self.ptr.max_wall_pj, zero_is_noval=False) + + @max_wall_time_per_job.setter + def max_wall_time_per_job(self, val): + self.ptr.max_wall_pj = u32(val, zero_is_noval=False) + + @property + def min_priority_threshold(self): + return u32_parse(self.ptr.min_prio_thresh, zero_is_noval=False) + + @min_priority_threshold.setter + def min_priority_threshold(self, val): + self.ptr.min_prio_thresh = u32(val, zero_is_noval=False) + + @property + def parent_account(self): + return cstr.to_unicode(self.ptr.parent_acct) + + @property + def parent_account_id(self): + return u32_parse(self.ptr.parent_id, zero_is_noval=False) + + @property + def partition(self): + return cstr.to_unicode(self.ptr.partition) + + @partition.setter + def partition(self, val): + cstr.fmalloc(&self.ptr.partition, val) + + @property + def priority(self): + return u32_parse(self.ptr.priority, zero_is_noval=False) + + @priority.setter + def priority(self, val): + self.ptr.priority = u32(val) + + @property + def rgt(self): + return u32_parse(self.ptr.rgt) + + @property + def shares(self): + return u32_parse(self.ptr.shares_raw, zero_is_noval=False) + + @shares.setter + def shares(self, val): + self.ptr.shares_raw = u32(val) + + @property + def user(self): + return cstr.to_unicode(self.ptr.user) + + @user.setter + def user(self, val): + cstr.fmalloc(&self.ptr.user, val) + + +cdef _parse_assoc_ptr(Association ass): + cdef: + dict tres = ass.tres_data + dict qos = ass.qos_data + + ass.group_tres = TrackableResourceLimits.from_ids( + ass.ptr.grp_tres, tres) + ass.group_tres_mins = TrackableResourceLimits.from_ids( + ass.ptr.grp_tres_mins, tres) + ass.group_tres_run_mins = TrackableResourceLimits.from_ids( + ass.ptr.grp_tres_mins, tres) + ass.max_tres_mins_per_job = TrackableResourceLimits.from_ids( + ass.ptr.max_tres_mins_pj, tres) + ass.max_tres_run_mins_per_user = TrackableResourceLimits.from_ids( + ass.ptr.max_tres_run_mins, tres) + ass.max_tres_per_job = TrackableResourceLimits.from_ids( + ass.ptr.max_tres_pj, tres) + ass.max_tres_per_node = TrackableResourceLimits.from_ids( + ass.ptr.max_tres_pn, tres) + ass.qos = qos_list_to_pylist(ass.ptr.qos_list, qos) + + +cdef _create_assoc_ptr(Association ass, conn=None): + # _set_tres_limits will also check if specified TRES are valid and + # translate them to its ID which is why we need to load the current TRES + # available in the system. + ass.tres_data = TrackableResources.load(db_connection=conn) + _set_tres_limits(&ass.ptr.grp_tres, ass.group_tres, ass.tres_data) + _set_tres_limits(&ass.ptr.grp_tres_mins, ass.group_tres_mins, + ass.tres_data) + _set_tres_limits(&ass.ptr.grp_tres_run_mins, ass.group_tres_run_mins, + ass.tres_data) + _set_tres_limits(&ass.ptr.max_tres_mins_pj, ass.max_tres_mins_per_job, + ass.tres_data) + _set_tres_limits(&ass.ptr.max_tres_run_mins, ass.max_tres_run_mins_per_user, + ass.tres_data) + _set_tres_limits(&ass.ptr.max_tres_pj, ass.max_tres_per_job, + ass.tres_data) + _set_tres_limits(&ass.ptr.max_tres_pn, ass.max_tres_per_node, + ass.tres_data) + + # _set_qos_list will also check if specified QoS are valid and translate + # them to its ID, which is why we need to load the current QOS available + # in the system. + ass.qos_data = QualitiesOfService.load(db_connection=conn) + _set_qos_list(&ass.ptr.qos_list, self.qos, ass.qos_data) + diff --git a/pyslurm/db/cluster.pxd b/pyslurm/db/cluster.pxd new file mode 100644 index 00000000..30acdbde --- /dev/null +++ b/pyslurm/db/cluster.pxd @@ -0,0 +1,27 @@ +######################################################################### +# cluster.pxd - pyslurm slurmdbd cluster api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + + +from pyslurm cimport slurm +from pyslurm.utils cimport cstr diff --git a/pyslurm/db/cluster.pyx b/pyslurm/db/cluster.pyx new file mode 100644 index 00000000..436183a8 --- /dev/null +++ b/pyslurm/db/cluster.pyx @@ -0,0 +1,31 @@ +######################################################################### +# cluster.pyx - pyslurm slurmdbd cluster api +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + +from pyslurm.core import slurmctld + + +LOCAL_CLUSTER = cstr.to_unicode(slurm.slurm_conf.cluster_name) +if not LOCAL_CLUSTER: + slurm_conf = slurmctld.Config.load() + LOCAL_CLUSTER = slurm_conf.cluster diff --git a/pyslurm/db/connection.pyx b/pyslurm/db/connection.pyx index eab6572d..67ef7603 100644 --- a/pyslurm/db/connection.pyx +++ b/pyslurm/db/connection.pyx @@ -25,6 +25,16 @@ from pyslurm.core.error import RPCError +def _open_conn_or_error(conn): + if not conn: + conn = Connection.open() + + if not conn.is_open: + raise ValueError("Database connection is not open") + + return conn + + cdef class Connection: def __cinit__(self): diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index 0faac2fd..fc395943 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -55,7 +55,7 @@ from pyslurm.db.qos cimport QualitiesOfService from pyslurm.db.tres cimport TrackableResources, TrackableResource -cdef class JobSearchFilter: +cdef class JobFilter: """Query-Conditions for Jobs in the Slurm Database. Args: @@ -150,11 +150,9 @@ cdef class JobSearchFilter: with_env -cdef class Jobs(dict): +cdef class Jobs(list): """A collection of [pyslurm.db.Job][] objects.""" - cdef: - SlurmList info - Connection db_conn + pass cdef class Job: @@ -285,7 +283,7 @@ cdef class Job: """ cdef: slurmdb_job_rec_t *ptr - QualitiesOfService qos_data + dict qos_data cdef public: JobSteps steps diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index af86f704..636e1137 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -27,6 +27,7 @@ from pyslurm.core.error import RPCError, PyslurmError from pyslurm.core import slurmctld from typing import Any from pyslurm.utils.uint import * +from pyslurm.db.cluster import LOCAL_CLUSTER from pyslurm.utils.ctime import ( date_to_timestamp, timestr_to_mins, @@ -39,11 +40,14 @@ from pyslurm.utils.helpers import ( uid_to_name, nodelist_to_range_str, instance_to_dict, + collection_to_dict, + group_collection_by_cluster, _get_exit_code, ) +from pyslurm.db.connection import _open_conn_or_error -cdef class JobSearchFilter: +cdef class JobFilter: def __cinit__(self): self.ptr = NULL @@ -73,14 +77,19 @@ cdef class JobSearchFilter: return None qos_id_list = [] - qos = QualitiesOfService.load() - for q in self.qos: - if isinstance(q, int): - qos_id_list.append(q) - elif q in qos: - qos_id_list.append(str(qos[q].id)) - else: - raise ValueError(f"QoS {q} does not exist") + qos_data = QualitiesOfService.load() + for user_input in self.qos: + found = False + for qos in qos_data: + if (qos.id == user_input + or qos.name == user_input + or qos == user_input): + qos_id_list.append(str(qos.id)) + found = True + break + + if not found: + raise ValueError(f"QoS '{user_input}' does not exist") return qos_id_list @@ -96,11 +105,9 @@ cdef class JobSearchFilter: def _parse_clusters(self): if not self.clusters: - # Get the local cluster name # This is a requirement for some other parameters to function # correctly, like self.nodelist - slurm_conf = slurmctld.Config.load() - return [slurm_conf.cluster] + return [LOCAL_CLUSTER] elif self.clusters == "all": return None else: @@ -178,31 +185,71 @@ cdef class JobSearchFilter: slurmdb_job_cond_def_start_end(ptr) -cdef class Jobs(dict): +# Alias +JobSearchFilter = JobFilter + + +cdef class Jobs(list): def __init__(self, jobs=None): - if isinstance(jobs, dict): - self.update(jobs) - elif isinstance(jobs, str): - joblist = jobs.split(",") - self.update({int(job): Job(job) for job in joblist}) - elif jobs is not None: + if isinstance(jobs, list): for job in jobs: if isinstance(job, int): - self[job] = Job(job) + self.append(Job(job)) else: - self[job.name] = job + self.append(job) + elif isinstance(jobs, str): + joblist = jobs.split(",") + self.extend([Job(job) for job in joblist]) + elif isinstance(jobs, dict): + self.extend([job for job in jobs.values()]) + elif jobs is not None: + raise TypeError("Invalid Type: {type(jobs)}") + + def as_dict(self, recursive=False, group_by_cluster=False): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + group_by_cluster (bool, optional): + By default, only the Jobs from your local Cluster are + returned. If this is set to `True`, then all the Jobs in the + collection will be grouped by the Cluster - with the name of + the cluster as the key and the value being the collection as + another dict. + + Returns: + (dict): Collection as a dict. + """ + col = collection_to_dict(self, identifier=Job.id, recursive=recursive) + if not group_by_cluster: + return col.get(LOCAL_CLUSTER, {}) + + return col + + def group_by_cluster(self): + """Group Jobs by cluster name + + Returns: + (dict[str, Jobs]): Jobs grouped by cluster. + """ + return group_collection_by_cluster(self) @staticmethod - def load(search_filter=None): + def load(JobFilter db_filter=None, Connection db_connection=None): """Load Jobs from the Slurm Database Implements the slurmdb_jobs_get RPC. Args: - search_filter (pyslurm.db.JobSearchFilter): + db_filter (pyslurm.db.JobFilter): A search filter that the slurmdbd will apply when retrieving Jobs from the database. + db_connection (pyslurm.db.Connection): + An open database connection. Returns: (pyslurm.db.Jobs): A Collection of database Jobs. @@ -223,30 +270,35 @@ cdef class Jobs(dict): >>> import pyslurm >>> accounts = ["acc1", "acc2"] - >>> search_filter = pyslurm.db.JobSearchFilter(accounts=accounts) - >>> db_jobs = pyslurm.db.Jobs.load(search_filter) + >>> db_filter = pyslurm.db.JobFilter(accounts=accounts) + >>> db_jobs = pyslurm.db.Jobs.load(db_filter) """ cdef: - Jobs jobs = Jobs() + Jobs out = Jobs() Job job - JobSearchFilter cond + JobFilter cond = db_filter + SlurmList job_data SlurmListItem job_ptr - QualitiesOfService qos_data - - if search_filter: - cond = search_filter - else: - cond = JobSearchFilter() + Connection conn + dict qos_data + # Prepare SQL Filter + if not db_filter: + cond = JobFilter() cond._create() - jobs.db_conn = Connection.open() - jobs.info = SlurmList.wrap(slurmdb_jobs_get(jobs.db_conn.ptr, - cond.ptr)) - if jobs.info.is_null: + + # Setup DB Conn + conn = _open_conn_or_error(db_connection) + + # Fetch Job data + job_data = SlurmList.wrap(slurmdb_jobs_get(conn.ptr, cond.ptr)) + if job_data.is_null: raise RPCError(msg="Failed to get Jobs from slurmdbd") - qos_data = QualitiesOfService.load(name_is_key=False, - db_connection=jobs.db_conn) + # Fetch other necessary dependencies needed for translating some + # attributes (i.e QoS IDs to its name) + qos_data = QualitiesOfService.load(db_connection=conn).as_dict( + name_is_key=False) # TODO: also get trackable resources with slurmdb_tres_get and store # it in each job instance. tres_alloc_str and tres_req_str only @@ -256,23 +308,23 @@ cdef class Jobs(dict): # TODO: For multi-cluster support, remove duplicate federation jobs # TODO: How to handle the possibility of duplicate job ids that could # appear if IDs on a cluster are resetted? - for job_ptr in SlurmList.iter_and_pop(jobs.info): + for job_ptr in SlurmList.iter_and_pop(job_data): job = Job.from_ptr(job_ptr.data) job.qos_data = qos_data job._create_steps() JobStatistics._sum_step_stats_for_job(job, job.steps) - jobs[job.id] = job + out.append(job) - return jobs + return out @staticmethod - def modify(search_filter, Job changes, db_connection=None): + def modify(db_filter, Job changes, db_connection=None): """Modify Slurm database Jobs. Implements the slurm_job_modify RPC. Args: - search_filter (Union[pyslurm.db.JobSearchFilter, pyslurm.db.Jobs]): + db_filter (Union[pyslurm.db.JobFilter, pyslurm.db.Jobs]): A filter to decide which Jobs should be modified. changes (pyslurm.db.Job): Another [pyslurm.db.Job][] object that contains all the @@ -307,9 +359,9 @@ cdef class Jobs(dict): >>> import pyslurm >>> - >>> search_filter = pyslurm.db.JobSearchFilter(ids=[9999]) + >>> db_filter = pyslurm.db.JobFilter(ids=[9999]) >>> changes = pyslurm.db.Job(comment="A comment for the job") - >>> modified_jobs = pyslurm.db.Jobs.modify(search_filter, changes) + >>> modified_jobs = pyslurm.db.Jobs.modify(db_filter, changes) >>> print(modified_jobs) >>> [9999] @@ -321,10 +373,10 @@ cdef class Jobs(dict): >>> import pyslurm >>> >>> db_conn = pyslurm.db.Connection.open() - >>> search_filter = pyslurm.db.JobSearchFilter(ids=[9999]) + >>> db_filter = pyslurm.db.JobFilter(ids=[9999]) >>> changes = pyslurm.db.Job(comment="A comment for the job") >>> modified_jobs = pyslurm.db.Jobs.modify( - ... search_filter, changes, db_conn) + ... db_filter, changes, db_conn) >>> >>> # Now you can first examine which Jobs have been modified >>> print(modified_jobs) @@ -333,28 +385,29 @@ cdef class Jobs(dict): >>> # changes >>> db_conn.commit() """ - cdef: - Job job = changes - JobSearchFilter jfilter - Connection conn = db_connection + JobFilter cond + Connection conn SlurmList response SlurmListItem response_ptr list out = [] - conn = Connection.open() if not conn else conn - if not conn.is_open: - raise ValueError("Database connection is not open") - - if isinstance(search_filter, Jobs): - job_ids = list(search_filter.keys()) - jfilter = JobSearchFilter(ids=job_ids) + # Prepare SQL Filter + if isinstance(db_filter, Jobs): + job_ids = [job.id for job in self] + cond = JobFilter(ids=job_ids) else: - jfilter = search_filter + cond = db_filter + cond._create() + + # Setup DB Conn + conn = _open_conn_or_error(db_connection) - jfilter._create() + # Modify Jobs, get the result + # This returns a List of char* with the Jobs ids that were + # modified response = SlurmList.wrap( - slurmdb_job_modify(conn.ptr, jfilter.ptr, job.ptr)) + slurmdb_job_modify(conn.ptr, cond.ptr, changes.ptr)) if not response.is_null and response.cnt: for response_ptr in response: @@ -391,9 +444,10 @@ cdef class Job: def __cinit__(self): self.ptr = NULL - def __init__(self, job_id=0, **kwargs): + def __init__(self, job_id=0, cluster=LOCAL_CLUSTER, **kwargs): self._alloc_impl() self.ptr.jobid = int(job_id) + cstr.fmalloc(&self.ptr.cluster, cluster) for k, v in kwargs.items(): setattr(self, k, v) @@ -417,7 +471,7 @@ cdef class Job: return wrap @staticmethod - def load(job_id, with_script=False, with_env=False): + def load(job_id, cluster=LOCAL_CLUSTER, with_script=False, with_env=False): """Load the information for a specific Job from the Database. Args: @@ -444,13 +498,15 @@ cdef class Job: >>> print(db_job.script) """ - jfilter = JobSearchFilter(ids=[int(job_id)], - with_script=with_script, with_env=with_env) + jfilter = JobFilter(ids=[int(job_id)], clusters=[cluster], + with_script=with_script, with_env=with_env) jobs = Jobs.load(jfilter) - if not jobs or job_id not in jobs: - raise RPCError(msg=f"Job {job_id} does not exist") + if not jobs: + raise RPCError(msg=f"Job {job_id} does not exist on " + f"Cluster {cluster}") - return jobs[job_id] + # TODO: There might be multiple entries when job ids were reset. + return jobs[0] def _create_steps(self): cdef: @@ -503,7 +559,7 @@ cdef class Job: Raises: RPCError: When modifying the Job failed. """ - cdef JobSearchFilter jfilter = JobSearchFilter(ids=[self.id]) + cdef JobFilter jfilter = JobFilter(ids=[self.id]) Jobs.modify(jfilter, changes, db_connection) @property diff --git a/pyslurm/db/qos.pxd b/pyslurm/db/qos.pxd index b2b0bcf9..9cb3df86 100644 --- a/pyslurm/db/qos.pxd +++ b/pyslurm/db/qos.pxd @@ -30,6 +30,7 @@ from pyslurm.slurm cimport ( slurmdb_destroy_qos_cond, slurmdb_qos_get, slurm_preempt_mode_num, + List, try_xmalloc, ) from pyslurm.db.util cimport ( @@ -40,14 +41,14 @@ from pyslurm.db.util cimport ( from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr +cdef _set_qos_list(List *in_list, vals, QualitiesOfService data) -cdef class QualitiesOfService(dict): - cdef: - SlurmList info - Connection db_conn +cdef class QualitiesOfService(list): + pass -cdef class QualityOfServiceSearchFilter: + +cdef class QualityOfServiceFilter: cdef slurmdb_qos_cond_t *ptr cdef public: diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index 2851587e..a01ef9b0 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -23,46 +23,72 @@ # cython: language_level=3 from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict +from pyslurm.utils.helpers import instance_to_dict, collection_to_dict_global +from pyslurm.db.connection import _open_conn_or_error -cdef class QualitiesOfService(dict): +cdef class QualitiesOfService(list): def __init__(self): pass + def as_dict(self, recursive=False, name_is_key=True): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + name_is_key (bool, optional): + By default, the keys in this dict are the names of each QoS. + If this is set to `False`, then the unique ID of the QoS will + be used as dict keys. + + Returns: + (dict): Collection as a dict. + """ + identifier = QualityOfService.name + if not name_is_key: + identifier = QualityOfService.id + + return collection_to_dict_global(self, identifier=identifier, + recursive=recursive) + @staticmethod - def load(search_filter=None, name_is_key=True, db_connection=None): + def load(QualityOfServiceFilter db_filter=None, + Connection db_connection=None): cdef: - QualitiesOfService qos_dict = QualitiesOfService() + QualitiesOfService out = QualitiesOfService() QualityOfService qos - QualityOfServiceSearchFilter cond + QualityOfServiceFilter cond = db_filter + SlurmList qos_data SlurmListItem qos_ptr - Connection conn = db_connection - - if search_filter: - cond = search_filter - else: - cond = QualityOfServiceSearchFilter() + Connection conn + # Prepare SQL Filter + if not db_filter: + cond = QualityOfServiceFilter() cond._create() - qos_dict.db_conn = Connection.open() if not conn else conn - qos_dict.info = SlurmList.wrap(slurmdb_qos_get(qos_dict.db_conn.ptr, - cond.ptr)) - if qos_dict.info.is_null: + + # Setup DB Conn + conn = _open_conn_or_error(db_connection) + + # Fetch QoS Data + qos_data = SlurmList.wrap(slurmdb_qos_get(conn.ptr, cond.ptr)) + + if qos_data.is_null: raise RPCError(msg="Failed to get QoS data from slurmdbd") - for qos_ptr in SlurmList.iter_and_pop(qos_dict.info): + # Setup QOS objects + for qos_ptr in SlurmList.iter_and_pop(qos_data): qos = QualityOfService.from_ptr(qos_ptr.data) - if name_is_key: - qos_dict[qos.name] = qos - else: - qos_dict[qos.id] = qos + out.append(qos) - return qos_dict + return out -cdef class QualityOfServiceSearchFilter: +cdef class QualityOfServiceFilter: def __cinit__(self): self.ptr = NULL @@ -168,12 +194,12 @@ cdef class QualityOfService: RPCError: If requesting the information from the database was not sucessful. """ - qfilter = QualityOfServiceSearchFilter(names=[name]) + qfilter = QualityOfServiceFilter(names=[name]) qos_data = QualitiesOfService.load(qfilter) - if not qos_data or name not in qos_data: + if not qos_data: raise RPCError(msg=f"QualityOfService {name} does not exist") - return qos_data[name] + return qos_data[0] @property def name(self): @@ -190,3 +216,24 @@ cdef class QualityOfService: @property def id(self): return self.ptr.id + + +def _qos_names_to_ids(qos_list, QualitiesOfService data): + cdef list out = [] + if not qos_list: + return None + + return [_validate_qos_single(qid, data) for qid in qos_list] + + +def _validate_qos_single(qid, QualitiesOfService data): + for item in data: + if qid == item.id or qid == item.name: + return item.id + + raise ValueError(f"Invalid QOS specified: {qid}") + + +cdef _set_qos_list(List *in_list, vals, QualitiesOfService data): + qos_ids = _qos_names_to_ids(vals, data) + make_char_list(in_list, qos_ids) diff --git a/pyslurm/db/step.pyx b/pyslurm/db/step.pyx index 22a46fa8..fa4ab8bb 100644 --- a/pyslurm/db/step.pyx +++ b/pyslurm/db/step.pyx @@ -32,9 +32,9 @@ from pyslurm.utils.helpers import ( uid_to_name, instance_to_dict, _get_exit_code, + humanize_step_id, ) from pyslurm.core.job.util import cpu_freq_int_to_str -from pyslurm.core.job.step import humanize_step_id cdef class JobStep: diff --git a/pyslurm/db/tres.pxd b/pyslurm/db/tres.pxd index 40d28799..41ed1b4d 100644 --- a/pyslurm/db/tres.pxd +++ b/pyslurm/db/tres.pxd @@ -25,18 +25,59 @@ from pyslurm.utils cimport cstr from libc.stdint cimport uint64_t from pyslurm.slurm cimport ( slurmdb_tres_rec_t, + slurmdb_tres_cond_t, + slurmdb_destroy_tres_cond, + slurmdb_init_tres_cond, slurmdb_destroy_tres_rec, slurmdb_find_tres_count_in_string, + slurmdb_tres_get, try_xmalloc, ) +from pyslurm.db.util cimport ( + SlurmList, + SlurmListItem, +) +from pyslurm.db.connection cimport Connection + +cdef find_tres_count(char *tres_str, typ, on_noval=*, on_inf=*) +cdef find_tres_limit(char *tres_str, typ) +cdef merge_tres_str(char **tres_str, typ, val) +cdef _tres_ids_to_names(char *tres_str, dict tres_data) +cdef _set_tres_limits(char **dest, TrackableResourceLimits src, + TrackableResources tres_data) + +cdef class TrackableResourceLimits: + + cdef public: + cpu + mem + energy + node + billing + fs + vmem + pages + gres + license + + @staticmethod + cdef from_ids(char *tres_id_str, dict tres_data) -cdef class TrackableResources(dict): + +cdef class TrackableResourceFilter: + cdef slurmdb_tres_cond_t *ptr + + +cdef class TrackableResources(list): cdef public raw_str @staticmethod cdef TrackableResources from_str(char *tres_str) + @staticmethod + cdef find_count_in_str(char *tres_str, typ, on_noval=*, on_inf=*) + cdef class TrackableResource: cdef slurmdb_tres_rec_t *ptr diff --git a/pyslurm/db/tres.pyx b/pyslurm/db/tres.pyx index f4e84130..df93dda0 100644 --- a/pyslurm/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -23,13 +23,175 @@ # cython: language_level=3 from pyslurm.utils.uint import * +from pyslurm.constants import UNLIMITED +from pyslurm.core.error import RPCError +from pyslurm.utils.helpers import instance_to_dict, collection_to_dict_global +from pyslurm.utils import cstr +from pyslurm.db.connection import _open_conn_or_error +import json -cdef class TrackableResources(dict): +TRES_TYPE_DELIM = "/" + + +cdef class TrackableResourceLimits: + + def __init__(self, **kwargs): + self.fs = {} + self.gres = {} + self.license = {} + + for k, v in kwargs.items(): + if TRES_TYPE_DELIM in k: + typ, name = self._unflatten_tres(k) + cur_val = getattr(self, typ) + + if not isinstance(cur_val, dict): + raise ValueError(f"TRES Type {typ} cannot have a name " + f"({name}). Invalid Value: {typ}/{name}") + + cur_val.update({name : int(v)}) + setattr(self, typ, cur_val) + else: + setattr(self, k, v) + + @staticmethod + cdef from_ids(char *tres_id_str, dict tres_data): + tres_list = _tres_ids_to_names(tres_id_str, tres_data) + if not tres_list: + return None + + cdef TrackableResourceLimits out = TrackableResourceLimits() + + for tres in tres_list: + typ, name, cnt = tres + cur_val = getattr(out, typ, slurm.NO_VAL64) + if cur_val != slurm.NO_VAL64: + if isinstance(cur_val, dict): + cur_val.update({name : cnt}) + setattr(out, typ, cur_val) + else: + setattr(out, typ, cnt) + + return out + + def _validate(self, TrackableResources tres_data): + id_dict = _tres_names_to_ids(self.as_dict(flatten_limits=True), + tres_data) + return id_dict + + def _unflatten_tres(self, type_and_name): + typ, name = type_and_name.split(TRES_TYPE_DELIM, 1) + return typ, name + + def _flatten_tres(self, typ, vals): + cdef dict out = {} + for name, cnt in vals.items(): + out[f"{typ}{TRES_TYPE_DELIM}{name}"] = cnt + + return out + + def as_dict(self, flatten_limits=False): + cdef dict inst_dict = instance_to_dict(self) + + if flatten_limits: + vals = inst_dict.pop("fs") + inst_dict.update(self._flatten_tres("fs", vals)) + + vals = inst_dict.pop("license") + inst_dict.update(self._flatten_tres("license", vals)) + + vals = inst_dict.pop("gres") + inst_dict.update(self._flatten_tres("gres", vals)) + + return inst_dict + + +cdef class TrackableResourceFilter: + + def __cinit__(self): + self.ptr = NULL + + def __init__(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + + def __dealloc__(self): + self._dealloc() + + def _dealloc(self): + slurmdb_destroy_tres_cond(self.ptr) + self.ptr = NULL + + def _alloc(self): + self._dealloc() + self.ptr = try_xmalloc(sizeof(slurmdb_tres_cond_t)) + if not self.ptr: + raise MemoryError("xmalloc failed for slurmdb_tres_cond_t") + slurmdb_init_tres_cond(self.ptr, 0) + + def _create(self): + self._alloc() + + +cdef class TrackableResources(list): def __init__(self): pass + def as_dict(self, recursive=False, name_is_key=True): + """Convert the collection data to a dict. + + Args: + recursive (bool, optional): + By default, the objects will not be converted to a dict. If + this is set to `True`, then additionally all objects are + converted to dicts. + name_is_key (bool, optional): + By default, the keys in this dict are the names of each TRES. + If this is set to `False`, then the unique ID of the TRES will + be used as dict keys. + + Returns: + (dict): Collection as a dict. + """ + identifier = TrackableResource.type_and_name + if not name_is_key: + identifier = TrackableResource.id + + return collection_to_dict_global(self, identifier=identifier, + recursive=recursive) + + @staticmethod + def load(Connection db_connection=None): + cdef: + TrackableResources out = TrackableResources() + TrackableResource tres + Connection conn + SlurmList tres_data + SlurmListItem tres_ptr + TrackableResourceFilter db_filter = TrackableResourceFilter() + + # Prepare SQL Filter + db_filter._create() + + # Setup DB Conn + conn = _open_conn_or_error(db_connection) + + # Fetch TRES data + tres_data = SlurmList.wrap(slurmdb_tres_get(conn.ptr, db_filter.ptr)) + + if tres_data.is_null: + raise RPCError(msg="Failed to get TRES data from slurmdbd") + + # Setup TRES objects + for tres_ptr in SlurmList.iter_and_pop(tres_data): + tres = TrackableResource.from_ptr( + tres_ptr.data) + out.append(tres) + + return out + @staticmethod cdef TrackableResources from_str(char *tres_str): cdef: @@ -51,16 +213,8 @@ cdef class TrackableResources(dict): return tres @staticmethod - def find_count_in_str(tres_str, typ): - if not tres_str: - return 0 - - cdef uint64_t tmp - tmp = slurmdb_find_tres_count_in_string(tres_str, typ) - if tmp == slurm.INFINITE64 or tmp == slurm.NO_VAL64: - return 0 - else: - return tmp + cdef find_count_in_str(char *tres_str, typ, on_noval=0, on_inf=0): + return find_tres_count(tres_str, typ, on_noval, on_inf) cdef class TrackableResource: @@ -92,6 +246,9 @@ cdef class TrackableResource: wrap.ptr = in_ptr return wrap + def as_dict(self): + return instance_to_dict(self) + @property def id(self): return self.ptr.id @@ -104,9 +261,94 @@ cdef class TrackableResource: def type(self): return cstr.to_unicode(self.ptr.type) + @property + def type_and_name(self): + type_and_name = self.type + if self.name: + type_and_name = f"{type_and_name}{TRES_TYPE_DELIM}{self.name}" + + return type_and_name + @property def count(self): return u64_parse(self.ptr.count) # rec_count # alloc_secs + + +cdef find_tres_count(char *tres_str, typ, on_noval=0, on_inf=0): + if not tres_str: + return on_noval + + cdef uint64_t tmp + tmp = slurmdb_find_tres_count_in_string(tres_str, typ) + if tmp == slurm.INFINITE64: + return on_inf + elif tmp == slurm.NO_VAL64: + return on_noval + else: + return tmp + + +cdef find_tres_limit(char *tres_str, typ): + return find_tres_count(tres_str, typ, on_noval=None, on_inf=UNLIMITED) + + +cdef merge_tres_str(char **tres_str, typ, val): + cdef uint64_t _val = u64(dehumanize(val)) + + current = cstr.to_dict(tres_str[0]) + if _val == slurm.NO_VAL64: + current.pop(typ, None) + else: + current.update({typ : _val}) + + cstr.from_dict(tres_str, current) + + +cdef _tres_ids_to_names(char *tres_str, dict tres_data): + if not tres_str: + return None + + cdef: + dict tdict = cstr.to_dict(tres_str) + list out = [] + + if not tres_data: + return None + + for tid, cnt in tdict.items(): + if isinstance(tid, str) and tid.isdigit(): + _tid = int(tid) + if _tid in tres_data: + out.append( + (tres_data[_tid].type, tres_data[_tid].name, int(cnt)) + ) + + return out + + +def _tres_names_to_ids(dict tres_dict, TrackableResources tres_data): + cdef dict out = {} + if not tres_dict: + return out + + for tid, cnt in tres_dict.items(): + real_id = _validate_tres_single(tid, tres_data) + out[real_id] = cnt + + return out + + +def _validate_tres_single(tid, TrackableResources tres_data): + for tres in tres_data: + if tid == tres.id or tid == tres.type_and_name: + return tres.id + + raise ValueError(f"Invalid TRES specified: {tid}") + + +cdef _set_tres_limits(char **dest, TrackableResourceLimits src, + TrackableResources tres_data): + cstr.from_dict(dest, src._validate(tres_data)) diff --git a/pyslurm/db/util.pxd b/pyslurm/db/util.pxd index 2e9498a6..01951de8 100644 --- a/pyslurm/db/util.pxd +++ b/pyslurm/db/util.pxd @@ -39,6 +39,7 @@ from pyslurm.slurm cimport ( cdef slurm_list_to_pylist(List in_list) cdef make_char_list(List *in_list, vals) +cdef qos_list_to_pylist(List in_list, qos_data) cdef class SlurmListItem: diff --git a/pyslurm/db/util.pyx b/pyslurm/db/util.pyx index 2560c4b0..672886c2 100644 --- a/pyslurm/db/util.pyx +++ b/pyslurm/db/util.pyx @@ -43,6 +43,15 @@ cdef slurm_list_to_pylist(List in_list): return SlurmList.wrap(in_list, owned=False).to_pylist() +cdef qos_list_to_pylist(List in_list, qos_data): + if not in_list: + return [] + + cdef list qos_nums = SlurmList.wrap(in_list, owned=False).to_pylist() + return [qos.name for qos_id, qos in qos_data.items() + if qos_id in qos_nums] + + cdef class SlurmListItem: def __cinit__(self): diff --git a/pyslurm/slurm/extra.pxi b/pyslurm/slurm/extra.pxi index fb922ac5..3557b0b9 100644 --- a/pyslurm/slurm/extra.pxi +++ b/pyslurm/slurm/extra.pxi @@ -165,6 +165,9 @@ ctypedef enum tres_types_t: # Global Environment cdef extern char **environ +# Local slurm config +cdef extern slurm_conf_t slurm_conf + # # Slurm Memory routines # We simply use the macros from xmalloc.h - more convenient @@ -272,6 +275,8 @@ cdef extern char *slurm_hostlist_deranged_string_malloc(hostlist_t hl) cdef extern void slurmdb_job_cond_def_start_end(slurmdb_job_cond_t *job_cond) cdef extern uint64_t slurmdb_find_tres_count_in_string(char *tres_str_in, int id) cdef extern slurmdb_job_rec_t *slurmdb_create_job_rec() +cdef extern void slurmdb_init_assoc_rec(slurmdb_assoc_rec_t *assoc, bool free_it) +cdef extern void slurmdb_init_tres_cond(slurmdb_tres_cond_t *tres, bool free_it) # # Slurm Partition functions diff --git a/pyslurm/utils/cstr.pyx b/pyslurm/utils/cstr.pyx index 489d80e8..13795544 100644 --- a/pyslurm/utils/cstr.pyx +++ b/pyslurm/utils/cstr.pyx @@ -186,7 +186,7 @@ def dict_to_str(vals, prepend=None, delim1=",", delim2="="): tmp_dict = validate_str_key_value_format(vals, delim1, delim2) for k, v in tmp_dict.items(): - if ((delim1 in k or delim2 in k) or + if ((delim1 in str(k) or delim2 in str(k)) or delim1 in str(v) or delim2 in str(v)): raise ValueError( f"Key or Value cannot contain either {delim1} or {delim2}. " diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index fcfe9965..fb1d2201 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -341,6 +341,50 @@ def instance_to_dict(inst): return out +def collection_to_dict(collection, identifier, recursive=False, group_id=None): + cdef dict out = {} + + for item in collection: + cluster = item.cluster + if cluster not in out: + out[cluster] = {} + + _id = identifier.__get__(item) + data = item if not recursive else item.as_dict() + + if group_id: + grp_id = group_id.__get__(item) + if grp_id not in out[cluster]: + out[cluster][grp_id] = {} + out[cluster][grp_id].update({_id: data}) + else: + out[cluster][_id] = data + + return out + + +def collection_to_dict_global(collection, identifier, recursive=False): + cdef dict out = {} + for item in collection: + _id = identifier.__get__(item) + out[_id] = item if not recursive else item.as_dict() + return out + + +def group_collection_by_cluster(collection): + cdef dict out = {} + collection_type = type(collection) + + for item in collection: + cluster = item.cluster + if cluster not in out: + out[cluster] = collection_type() + + out[cluster].append(item) + + return out + + def _sum_prop(obj, name, startval=0): val = startval for n in obj.values(): @@ -362,3 +406,29 @@ def _get_exit_code(exit_code): exit_state -= 128 return exit_state, sig + + +def humanize_step_id(sid): + if sid == slurm.SLURM_BATCH_SCRIPT: + return "batch" + elif sid == slurm.SLURM_EXTERN_CONT: + return "extern" + elif sid == slurm.SLURM_INTERACTIVE_STEP: + return "interactive" + elif sid == slurm.SLURM_PENDING_STEP: + return "pending" + else: + return sid + + +def dehumanize_step_id(sid): + if sid == "batch": + return slurm.SLURM_BATCH_SCRIPT + elif sid == "extern": + return slurm.SLURM_EXTERN_CONT + elif sid == "interactive": + return slurm.SLURM_INTERACTIVE_STEP + elif sid == "pending": + return slurm.SLURM_PENDING_STEP + else: + return int(sid) diff --git a/tests/integration/test_db_job.py b/tests/integration/test_db_job.py index 36005935..571ec0d2 100644 --- a/tests/integration/test_db_job.py +++ b/tests/integration/test_db_job.py @@ -42,7 +42,7 @@ def test_load_single(submit_job): assert db_job.id == job.id with pytest.raises(pyslurm.RPCError): - pyslurm.db.Job.load(1000) + pyslurm.db.Job.load(0) def test_parse_all(submit_job): @@ -59,7 +59,7 @@ def test_modify(submit_job): job = submit_job() util.wait(5) - jfilter = pyslurm.db.JobSearchFilter(ids=[job.id]) + jfilter = pyslurm.db.JobFilter(ids=[job.id]) changes = pyslurm.db.Job(comment="test comment") pyslurm.db.Jobs.modify(jfilter, changes) @@ -72,7 +72,7 @@ def test_modify_with_existing_conn(submit_job): util.wait(5) conn = pyslurm.db.Connection.open() - jfilter = pyslurm.db.JobSearchFilter(ids=[job.id]) + jfilter = pyslurm.db.JobFilter(ids=[job.id]) changes = pyslurm.db.Job(comment="test comment") pyslurm.db.Jobs.modify(jfilter, changes, conn) diff --git a/tests/integration/test_db_qos.py b/tests/integration/test_db_qos.py index 5bbd69e4..11d9e870 100644 --- a/tests/integration/test_db_qos.py +++ b/tests/integration/test_db_qos.py @@ -50,6 +50,6 @@ def test_load_all(): def test_load_with_filter_name(): - qfilter = pyslurm.db.QualityOfServiceSearchFilter(names=["non_existent"]) + qfilter = pyslurm.db.QualityOfServiceFilter(names=["non_existent"]) qos = pyslurm.db.QualitiesOfService.load(qfilter) assert not qos diff --git a/tests/integration/test_job.py b/tests/integration/test_job.py index 15c4bdef..cef42daf 100644 --- a/tests/integration/test_job.py +++ b/tests/integration/test_job.py @@ -150,7 +150,7 @@ def test_get_job_queue(submit_job): # Submit 10 jobs, gather the job_ids in a list job_list = [submit_job() for i in range(10)] - jobs = Jobs.load() + jobs = Jobs.load().as_dict() for job in job_list: # Check to see if all the Jobs we submitted exist assert job.id in jobs diff --git a/tests/integration/test_job_steps.py b/tests/integration/test_job_steps.py index bd17a188..b24409f5 100644 --- a/tests/integration/test_job_steps.py +++ b/tests/integration/test_job_steps.py @@ -102,9 +102,9 @@ def test_collection(submit_job): job = submit_job(script=create_job_script_multi_step()) time.sleep(util.WAIT_SECS_SLURMCTLD) - steps = JobSteps.load(job) + steps = JobSteps.load(job).as_dict() - assert steps != {} + assert steps # We have 3 Steps: batch, 0 and 1 assert len(steps) == 3 assert ("batch" in steps and @@ -116,7 +116,7 @@ def test_cancel(submit_job): job = submit_job(script=create_job_script_multi_step()) time.sleep(util.WAIT_SECS_SLURMCTLD) - steps = JobSteps.load(job) + steps = JobSteps.load(job).as_dict() assert len(steps) == 3 assert ("batch" in steps and 0 in steps and @@ -125,7 +125,7 @@ def test_cancel(submit_job): steps[0].cancel() time.sleep(util.WAIT_SECS_SLURMCTLD) - steps = JobSteps.load(job) + steps = JobSteps.load(job).as_dict() assert len(steps) == 2 assert ("batch" in steps and 1 in steps) diff --git a/tests/integration/test_node.py b/tests/integration/test_node.py index fb6f5197..49a69db2 100644 --- a/tests/integration/test_node.py +++ b/tests/integration/test_node.py @@ -29,7 +29,7 @@ def test_load(): - name = Nodes.load().as_list()[0].name + name = Nodes.load()[0].name # Now load the node info node = Node.load(name) @@ -56,7 +56,7 @@ def test_create(): def test_modify(): - node = Node(Nodes.load().as_list()[0].name) + node = Node(Nodes.load()[0].name) node.modify(Node(weight=10000)) assert Node.load(node.name).weight == 10000 @@ -69,4 +69,4 @@ def test_modify(): def test_parse_all(): - Node.load(Nodes.load().as_list()[0].name).as_dict() + Node.load(Nodes.load()[0].name).as_dict() diff --git a/tests/integration/test_partition.py b/tests/integration/test_partition.py index fcfcf4af..8d7a4de4 100644 --- a/tests/integration/test_partition.py +++ b/tests/integration/test_partition.py @@ -28,7 +28,7 @@ def test_load(): - part = Partitions.load().as_list()[0] + part = Partitions.load()[0] assert part.name assert part.state @@ -49,7 +49,7 @@ def test_create_delete(): def test_modify(): - part = Partitions.load().as_list()[0] + part = Partitions.load()[0] part.modify(Partition(default_time=120)) assert Partition.load(part.name).default_time == 120 @@ -68,22 +68,23 @@ def test_modify(): def test_parse_all(): - Partitions.load().as_list()[0].as_dict() + Partitions.load()[0].as_dict() def test_reload(): _partnames = [util.randstr() for i in range(3)] _tmp_parts = Partitions(_partnames) - for part in _tmp_parts.values(): + for part in _tmp_parts: part.create() all_parts = Partitions.load() assert len(all_parts) >= 3 my_parts = Partitions(_partnames[1:]).reload() + print(my_parts) assert len(my_parts) == 2 - for part in my_parts.as_list(): + for part in my_parts: assert part.state != "UNKNOWN" - for part in _tmp_parts.values(): + for part in _tmp_parts: part.delete() diff --git a/tests/unit/test_common.py b/tests/unit/test_common.py index 48f4fecf..1598d191 100644 --- a/tests/unit/test_common.py +++ b/tests/unit/test_common.py @@ -54,6 +54,10 @@ cpubind_to_num, nodelist_from_range_str, nodelist_to_range_str, + instance_to_dict, + collection_to_dict, + collection_to_dict_global, + group_collection_by_cluster, _sum_prop, ) from pyslurm.utils import cstr @@ -426,4 +430,59 @@ def cpus(self): assert _sum_prop(object_dict, TestObject.memory) == expected expected = 0 - assert _sum_prop(object_dict, TestObject.cpus) == 0 + assert _sum_prop(object_dict, TestObject.cpus) == expected + + def test_collection_to_dict(self): + class TestObject: + + def __init__(self, _id, _grp_id, cluster): + self._id = _id + self._grp_id = _grp_id + self.cluster = cluster + + @property + def id(self): + return self._id + + @property + def group_id(self): + return self._grp_id + + def as_dict(self): + return instance_to_dict(self) + + class TestCollection(list): + + def __init__(self, data): + super().__init__() + self.extend(data) + + OFFSET = 100 + RANGE = 10 + + data = [TestObject(x, x+OFFSET, "TestCluster") for x in range(RANGE)] + collection = TestCollection(data) + + coldict = collection_to_dict(collection, identifier=TestObject.id) + coldict = coldict.get("TestCluster", {}) + + assert len(coldict) == RANGE + for i in range(RANGE): + assert i in coldict + assert isinstance(coldict[i], TestObject) + + coldict = collection_to_dict(collection, identifier=TestObject.id, + group_id=TestObject.group_id) + coldict = coldict.get("TestCluster", {}) + + assert len(coldict) == RANGE + for i in range(RANGE): + assert i+OFFSET in coldict + assert i in coldict[i+OFFSET] + + coldict = collection_to_dict(collection, identifier=TestObject.id, + recursive=True) + coldict = coldict.get("TestCluster", {}) + + for item in coldict.values(): + assert isinstance(item, dict) diff --git a/tests/unit/test_db_job.py b/tests/unit/test_db_job.py index 9391f04a..7b77671f 100644 --- a/tests/unit/test_db_job.py +++ b/tests/unit/test_db_job.py @@ -25,7 +25,7 @@ def test_filter(): - job_filter = pyslurm.db.JobSearchFilter() + job_filter = pyslurm.db.JobFilter() job_filter.clusters = ["test1"] job_filter.partitions = ["partition1", "partition2"] @@ -45,6 +45,7 @@ def test_filter(): def test_create_collection(): jobs = pyslurm.db.Jobs("101,102") assert len(jobs) == 2 + jobs = jobs.as_dict() assert 101 in jobs assert 102 in jobs assert jobs[101].id == 101 @@ -52,6 +53,7 @@ def test_create_collection(): jobs = pyslurm.db.Jobs([101, 102]) assert len(jobs) == 2 + jobs = jobs.as_dict() assert 101 in jobs assert 102 in jobs assert jobs[101].id == 101 @@ -64,6 +66,7 @@ def test_create_collection(): } ) assert len(jobs) == 2 + jobs = jobs.as_dict() assert 101 in jobs assert 102 in jobs assert jobs[101].id == 101 diff --git a/tests/unit/test_db_qos.py b/tests/unit/test_db_qos.py index acf12fea..0d2fd538 100644 --- a/tests/unit/test_db_qos.py +++ b/tests/unit/test_db_qos.py @@ -25,7 +25,7 @@ def test_search_filter(): - qos_filter = pyslurm.db.QualityOfServiceSearchFilter() + qos_filter = pyslurm.db.QualityOfServiceFilter() qos_filter._create() qos_filter.ids = [1, 2] diff --git a/tests/unit/test_job_steps.py b/tests/unit/test_job_steps.py index c222ef34..fcd0d012 100644 --- a/tests/unit/test_job_steps.py +++ b/tests/unit/test_job_steps.py @@ -22,7 +22,7 @@ import pytest from pyslurm import JobStep, Job -from pyslurm.core.job.step import ( +from pyslurm.utils.helpers import ( humanize_step_id, dehumanize_step_id, ) diff --git a/tests/unit/test_node.py b/tests/unit/test_node.py index f2b5594a..755e85d9 100644 --- a/tests/unit/test_node.py +++ b/tests/unit/test_node.py @@ -36,14 +36,14 @@ def test_parse_all(): def test_create_nodes_collection(): - nodes = Nodes("node1,node2") + nodes = Nodes("node1,node2").as_dict() assert len(nodes) == 2 assert "node1" in nodes assert "node2" in nodes assert nodes["node1"].name == "node1" assert nodes["node2"].name == "node2" - nodes = Nodes(["node1", "node2"]) + nodes = Nodes(["node1", "node2"]).as_dict() assert len(nodes) == 2 assert "node1" in nodes assert "node2" in nodes @@ -55,7 +55,7 @@ def test_create_nodes_collection(): "node1": Node("node1"), "node2": Node("node2"), } - ) + ).as_dict() assert len(nodes) == 2 assert "node1" in nodes assert "node2" in nodes diff --git a/tests/unit/test_partition.py b/tests/unit/test_partition.py index 141a6e51..89403ae2 100644 --- a/tests/unit/test_partition.py +++ b/tests/unit/test_partition.py @@ -32,14 +32,14 @@ def test_create_instance(): def test_create_collection(): - parts = Partitions("part1,part2") + parts = Partitions("part1,part2").as_dict() assert len(parts) == 2 assert "part1" in parts assert "part2" in parts assert parts["part1"].name == "part1" assert parts["part2"].name == "part2" - parts = Partitions(["part1", "part2"]) + parts = Partitions(["part1", "part2"]).as_dict() assert len(parts) == 2 assert "part1" in parts assert "part2" in parts @@ -51,7 +51,7 @@ def test_create_collection(): "part1": Partition("part1"), "part2": Partition("part2"), } - ) + ).as_dict() assert len(parts) == 2 assert "part1" in parts assert "part2" in parts From 0a6504757b6c5e6503704697610181a4509c6183 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Thu, 13 Jul 2023 22:52:44 +0200 Subject: [PATCH 29/48] Refactor + improve docs (#305) - kinda revert the change from #304 where all collections inherit from `list` - there is now a new base class `MultiClusterMap`, which acts as a dict but is capable of holding data from multiple clusters (for example when querying database jobs from multiple clusters in pyslurm.db.Jobs) - improve docs --- CHANGELOG.md | 7 +- docs/reference/config.md | 1 - docs/reference/constants.md | 2 - docs/reference/db/cluster.md | 1 - docs/reference/db/connection.md | 1 - docs/reference/db/event.md | 1 - docs/reference/db/job.md | 3 - docs/reference/db/jobfilter.md | 1 - docs/reference/db/jobstats.md | 1 - docs/reference/db/jobstep.md | 3 - docs/reference/db/reservation.md | 1 - docs/reference/exceptions.md | 3 - docs/reference/frontend.md | 1 - docs/reference/hostlist.md | 1 - docs/reference/job.md | 3 - docs/reference/jobstep.md | 3 - docs/reference/jobsubmitdescription.md | 1 - docs/reference/node.md | 3 - docs/reference/partition.md | 3 - docs/reference/reservation.md | 1 - docs/reference/statistics.md | 1 - docs/reference/topology.md | 1 - docs/reference/trigger.md | 1 - docs/reference/utilities.md | 24 +- docs/reference/xcollections.md | 16 + docs/stylesheets/extra.css | 6 + mkdocs.yml | 2 + pyslurm/core/job/job.pxd | 17 +- pyslurm/core/job/job.pyx | 138 ++---- pyslurm/core/job/step.pxd | 15 +- pyslurm/core/job/step.pyx | 100 ++-- pyslurm/core/job/submission.pyx | 1 - pyslurm/core/node.pxd | 13 +- pyslurm/core/node.pyx | 116 ++--- pyslurm/core/partition.pxd | 7 +- pyslurm/core/partition.pyx | 108 ++--- pyslurm/db/__init__.py | 1 - pyslurm/db/assoc.pxd | 7 +- pyslurm/db/assoc.pyx | 70 +-- pyslurm/db/cluster.pxd | 27 -- pyslurm/db/job.pxd | 9 +- pyslurm/db/job.pyx | 120 ++--- pyslurm/db/qos.pxd | 2 +- pyslurm/db/qos.pyx | 43 +- pyslurm/db/stats.pyx | 2 +- pyslurm/db/step.pxd | 2 +- pyslurm/db/step.pyx | 9 +- pyslurm/db/tres.pxd | 6 +- pyslurm/db/tres.pyx | 40 +- pyslurm/{db/cluster.pyx => settings.pyx} | 4 +- pyslurm/utils/helpers.pyx | 54 --- pyslurm/xcollections.pxd | 93 ++++ pyslurm/xcollections.pyx | 581 +++++++++++++++++++++++ tests/integration/test_db_job.py | 2 +- tests/integration/test_db_qos.py | 2 +- tests/integration/test_job.py | 6 +- tests/integration/test_job_steps.py | 11 +- tests/integration/test_node.py | 7 +- tests/integration/test_partition.py | 18 +- tests/unit/test_collection.py | 328 +++++++++++++ tests/unit/test_common.py | 79 +-- tests/unit/test_db_job.py | 37 +- tests/unit/test_db_qos.py | 5 - tests/unit/test_job.py | 4 +- tests/unit/test_job_steps.py | 4 +- tests/unit/test_node.py | 30 +- tests/unit/test_partition.py | 30 +- 67 files changed, 1368 insertions(+), 872 deletions(-) create mode 100644 docs/reference/xcollections.md delete mode 100644 pyslurm/db/cluster.pxd rename pyslurm/{db/cluster.pyx => settings.pyx} (92%) create mode 100644 pyslurm/xcollections.pxd create mode 100644 pyslurm/xcollections.pyx create mode 100644 tests/unit/test_collection.py diff --git a/CHANGELOG.md b/CHANGELOG.md index df972286..4f6dd4c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,9 +16,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - New attributes for a Database Job: - extra - failed_node -- Now possible to initialize a pyslurm.db.Jobs collection with existing job +- Now possible to initialize a [pyslurm.db.Jobs][] collection with existing job ids or pyslurm.db.Job objects - Added `as_dict` function to all Collections +- Added a new Base Class [MultiClusterMap][pyslurm.xcollections.MultiClusterMap] that some Collections inherit from. ### Fixed @@ -28,9 +29,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - the Job was older than a day ### Changed - -- All Collections (like [pyslurm.Jobs](https://pyslurm.github.io/23.2/reference/job/#pyslurm.Jobs)) inherit from `list` now instead of `dict` + - `JobSearchFilter` has been renamed to `JobFilter` +- Renamed `as_dict` Function of some classes to `to_dict` ## [23.2.1](https://github.com/PySlurm/pyslurm/releases/tag/v23.2.1) - 2023-05-18 diff --git a/docs/reference/config.md b/docs/reference/config.md index 94b0438e..a461aba5 100644 --- a/docs/reference/config.md +++ b/docs/reference/config.md @@ -7,4 +7,3 @@ title: Config removed in the future when a replacement is introduced ::: pyslurm.config - handler: python diff --git a/docs/reference/constants.md b/docs/reference/constants.md index dd659b4c..65301afb 100644 --- a/docs/reference/constants.md +++ b/docs/reference/constants.md @@ -3,5 +3,3 @@ title: constants --- ::: pyslurm.constants - handler: python - diff --git a/docs/reference/db/cluster.md b/docs/reference/db/cluster.md index e6d0a900..219988d5 100644 --- a/docs/reference/db/cluster.md +++ b/docs/reference/db/cluster.md @@ -7,4 +7,3 @@ title: Cluster removed in the future when a replacement is introduced ::: pyslurm.slurmdb_clusters - handler: python diff --git a/docs/reference/db/connection.md b/docs/reference/db/connection.md index 27c904fc..7d77639e 100644 --- a/docs/reference/db/connection.md +++ b/docs/reference/db/connection.md @@ -3,4 +3,3 @@ title: Connection --- ::: pyslurm.db.Connection - handler: python diff --git a/docs/reference/db/event.md b/docs/reference/db/event.md index 020abcac..2816aaae 100644 --- a/docs/reference/db/event.md +++ b/docs/reference/db/event.md @@ -7,4 +7,3 @@ title: Event removed in the future when a replacement is introduced ::: pyslurm.slurmdb_events - handler: python diff --git a/docs/reference/db/job.md b/docs/reference/db/job.md index a2c7fadd..e806cc1f 100644 --- a/docs/reference/db/job.md +++ b/docs/reference/db/job.md @@ -7,7 +7,4 @@ title: Job will be removed in a future release ::: pyslurm.db.Job - handler: python - ::: pyslurm.db.Jobs - handler: python diff --git a/docs/reference/db/jobfilter.md b/docs/reference/db/jobfilter.md index 21aa55d1..523d7c9c 100644 --- a/docs/reference/db/jobfilter.md +++ b/docs/reference/db/jobfilter.md @@ -3,4 +3,3 @@ title: JobFilter --- ::: pyslurm.db.JobFilter - handler: python diff --git a/docs/reference/db/jobstats.md b/docs/reference/db/jobstats.md index 35f31ac6..1bc17d20 100644 --- a/docs/reference/db/jobstats.md +++ b/docs/reference/db/jobstats.md @@ -3,4 +3,3 @@ title: JobStatistics --- ::: pyslurm.db.JobStatistics - handler: python diff --git a/docs/reference/db/jobstep.md b/docs/reference/db/jobstep.md index 392fab65..a7bdc720 100644 --- a/docs/reference/db/jobstep.md +++ b/docs/reference/db/jobstep.md @@ -3,7 +3,4 @@ title: JobStep --- ::: pyslurm.db.JobStep - handler: python - ::: pyslurm.db.JobSteps - handler: python diff --git a/docs/reference/db/reservation.md b/docs/reference/db/reservation.md index 1a1af0c4..c1f110a3 100644 --- a/docs/reference/db/reservation.md +++ b/docs/reference/db/reservation.md @@ -7,4 +7,3 @@ title: Reservation removed in the future when a replacement is introduced ::: pyslurm.slurmdb_reservations - handler: python diff --git a/docs/reference/exceptions.md b/docs/reference/exceptions.md index 90876435..4abc0047 100644 --- a/docs/reference/exceptions.md +++ b/docs/reference/exceptions.md @@ -3,7 +3,4 @@ title: Exceptions --- ::: pyslurm.PyslurmError - handler: python - ::: pyslurm.RPCError - handler: python diff --git a/docs/reference/frontend.md b/docs/reference/frontend.md index 5247e540..f56a7ecd 100644 --- a/docs/reference/frontend.md +++ b/docs/reference/frontend.md @@ -7,4 +7,3 @@ title: Frontend removed in the future when a replacement is introduced ::: pyslurm.front_end - handler: python diff --git a/docs/reference/hostlist.md b/docs/reference/hostlist.md index dc2d81ee..33f8485d 100644 --- a/docs/reference/hostlist.md +++ b/docs/reference/hostlist.md @@ -7,4 +7,3 @@ title: Hostlist removed in the future when a replacement is introduced ::: pyslurm.hostlist - handler: python diff --git a/docs/reference/job.md b/docs/reference/job.md index 8e3d0c6e..cb1c19eb 100644 --- a/docs/reference/job.md +++ b/docs/reference/job.md @@ -7,7 +7,4 @@ title: Job removed in a future release ::: pyslurm.Job - handler: python - ::: pyslurm.Jobs - handler: python diff --git a/docs/reference/jobstep.md b/docs/reference/jobstep.md index 2ce6ef7f..b7b3e2b9 100644 --- a/docs/reference/jobstep.md +++ b/docs/reference/jobstep.md @@ -7,7 +7,4 @@ title: JobStep will be removed in a future release ::: pyslurm.JobStep - handler: python - ::: pyslurm.JobSteps - handler: python diff --git a/docs/reference/jobsubmitdescription.md b/docs/reference/jobsubmitdescription.md index bd31bac9..bf7eb6bd 100644 --- a/docs/reference/jobsubmitdescription.md +++ b/docs/reference/jobsubmitdescription.md @@ -3,4 +3,3 @@ title: JobSubmitDescription --- ::: pyslurm.JobSubmitDescription - handler: python diff --git a/docs/reference/node.md b/docs/reference/node.md index ccb16c54..e8e8d619 100644 --- a/docs/reference/node.md +++ b/docs/reference/node.md @@ -7,7 +7,4 @@ title: Node removed in a future release ::: pyslurm.Node - handler: python - ::: pyslurm.Nodes - handler: python diff --git a/docs/reference/partition.md b/docs/reference/partition.md index b9701f55..9181e10f 100644 --- a/docs/reference/partition.md +++ b/docs/reference/partition.md @@ -7,7 +7,4 @@ title: Partition will be removed in a future release ::: pyslurm.Partition - handler: python - ::: pyslurm.Partitions - handler: python diff --git a/docs/reference/reservation.md b/docs/reference/reservation.md index 563e29db..c5a3d891 100644 --- a/docs/reference/reservation.md +++ b/docs/reference/reservation.md @@ -7,4 +7,3 @@ title: Reservation removed in the future when a replacement is introduced ::: pyslurm.reservation - handler: python diff --git a/docs/reference/statistics.md b/docs/reference/statistics.md index 1f2b2e37..043461f8 100644 --- a/docs/reference/statistics.md +++ b/docs/reference/statistics.md @@ -7,4 +7,3 @@ title: Statistics removed in the future when a replacement is introduced ::: pyslurm.statistics - handler: python diff --git a/docs/reference/topology.md b/docs/reference/topology.md index 1cb107a1..c6b8f9cc 100644 --- a/docs/reference/topology.md +++ b/docs/reference/topology.md @@ -7,4 +7,3 @@ title: Topology removed in the future when a replacement is introduced ::: pyslurm.topology - handler: python diff --git a/docs/reference/trigger.md b/docs/reference/trigger.md index 308a3e3f..e6ea1e98 100644 --- a/docs/reference/trigger.md +++ b/docs/reference/trigger.md @@ -7,4 +7,3 @@ title: Trigger removed in the future when a replacement is introduced ::: pyslurm.trigger - handler: python diff --git a/docs/reference/utilities.md b/docs/reference/utilities.md index 63eb7bc0..dbf4a09e 100644 --- a/docs/reference/utilities.md +++ b/docs/reference/utilities.md @@ -3,37 +3,17 @@ title: utils --- ::: pyslurm.utils - handler: python + options: + members: [] ::: pyslurm.utils.timestr_to_secs - handler: python - ::: pyslurm.utils.timestr_to_mins - handler: python - ::: pyslurm.utils.secs_to_timestr - handler: python - ::: pyslurm.utils.mins_to_timestr - handler: python - ::: pyslurm.utils.date_to_timestamp - handler: python - ::: pyslurm.utils.timestamp_to_date - handler: python - ::: pyslurm.utils.expand_range_str - handler: python - ::: pyslurm.utils.humanize - handler: python - ::: pyslurm.utils.dehumanize - handler: python - ::: pyslurm.utils.nodelist_from_range_str - handler: python - ::: pyslurm.utils.nodelist_to_range_str - handler: python diff --git a/docs/reference/xcollections.md b/docs/reference/xcollections.md new file mode 100644 index 00000000..fd57ec09 --- /dev/null +++ b/docs/reference/xcollections.md @@ -0,0 +1,16 @@ +--- +title: xcollections +--- + +::: pyslurm.xcollections + handler: python + options: + members: + - MultiClusterMap + - BaseView + - KeysView + - MCKeysView + - ItemsView + - MCItemsView + - ValuesView + - ClustersView diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index 9562d9be..eab891415 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -2,3 +2,9 @@ .md-grid { max-width: 75%; } + +/* Indentation. */ +div.doc-contents:not(.first) { + padding-left: 25px; + border-left: .05rem solid var(--md-typeset-table-color); +} diff --git a/mkdocs.yml b/mkdocs.yml index daea3007..9d81f66b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -56,6 +56,8 @@ plugins: docstring_style: google show_signature: true show_root_heading: true + show_symbol_type_toc: true + show_symbol_type_heading: true markdown_extensions: - admonition diff --git a/pyslurm/core/job/job.pxd b/pyslurm/core/job/job.pxd index bee4f9ec..4eb89bde 100644 --- a/pyslurm/core/job/job.pxd +++ b/pyslurm/core/job/job.pxd @@ -25,14 +25,12 @@ from pyslurm.utils cimport cstr, ctime from pyslurm.utils.uint cimport * from pyslurm.utils.ctime cimport time_t - from libc.string cimport memcpy, memset from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t, int64_t from libc.stdlib cimport free - from pyslurm.core.job.submission cimport JobSubmitDescription from pyslurm.core.job.step cimport JobSteps, JobStep - +from pyslurm.xcollections cimport MultiClusterMap from pyslurm cimport slurm from pyslurm.slurm cimport ( working_cluster_rec, @@ -67,8 +65,8 @@ from pyslurm.slurm cimport ( ) -cdef class Jobs(list): - """A collection of [pyslurm.Job][] objects. +cdef class Jobs(MultiClusterMap): + """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.Job][] objects. Args: jobs (Union[list, dict], optional=None): @@ -90,7 +88,7 @@ cdef class Jobs(list): This is the result of multiplying the run_time with the amount of cpus for each job. frozen (bool): - If this is set to True and the reload() method is called, then + If this is set to True and the `reload()` method is called, then *ONLY* Jobs that already exist in this collection will be reloaded. New Jobs that are discovered will not be added to this collection, but old Jobs which have already been purged from the @@ -115,15 +113,12 @@ cdef class Job: job_id (int): An Integer representing a Job-ID. - Raises: - MemoryError: If malloc fails to allocate memory. - Attributes: steps (JobSteps): Steps this Job has. Before you can access the Steps data for a Job, you have to call - the reload() method of a Job instance or the load_steps() method - of a Jobs collection. + the `reload()` method of a Job instance or the `load_steps()` + method of a Jobs collection. name (str): Name of the Job id (int): diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 2c33d581..e2915608 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -34,7 +34,8 @@ from typing import Union from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.job.util import * -from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.settings import LOCAL_CLUSTER +from pyslurm import xcollections from pyslurm.core.error import ( RPCError, verify_rpc, @@ -48,14 +49,11 @@ from pyslurm.utils.helpers import ( _getgrall_to_dict, _getpwall_to_dict, instance_to_dict, - collection_to_dict, - group_collection_by_cluster, - _sum_prop, _get_exit_code, ) -cdef class Jobs(list): +cdef class Jobs(MultiClusterMap): def __cinit__(self): self.info = NULL @@ -65,38 +63,11 @@ cdef class Jobs(list): def __init__(self, jobs=None, frozen=False): self.frozen = frozen - - if isinstance(jobs, list): - for job in jobs: - if isinstance(job, int): - self.append(Job(job)) - else: - self.append(job) - elif isinstance(jobs, str): - joblist = jobs.split(",") - self.extend([Job(int(job)) for job in joblist]) - elif isinstance(jobs, dict): - self.extend([job for job in jobs.values()]) - elif jobs is not None: - raise TypeError("Invalid Type: {type(jobs)}") - - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Job.id, recursive=recursive) - return col.get(LOCAL_CLUSTER, {}) - - def group_by_cluster(self): - return group_collection_by_cluster(self) + super().__init__(data=jobs, + typ="Jobs", + val_type=Job, + id_attr=Job.id, + key_type=int) @staticmethod def load(preload_passwd_info=False, frozen=False): @@ -122,7 +93,7 @@ cdef class Jobs(list): cdef: dict passwd = {} dict groups = {} - Jobs jobs = Jobs.__new__(Jobs) + Jobs jobs = Jobs(frozen=frozen) int flags = slurm.SHOW_ALL | slurm.SHOW_DETAIL Job job @@ -150,16 +121,13 @@ cdef class Jobs(list): job.passwd = passwd job.groups = groups - jobs.append(job) + cluster = job.cluster + if cluster not in jobs.data: + jobs.data[cluster] = {} + jobs[cluster][job.id] = job - # At this point we memcpy'd all the memory for the Jobs. Setting this - # to 0 will prevent the slurm job free function to deallocate the - # memory for the individual jobs. This should be fine, because they - # are free'd automatically in __dealloc__ since the lifetime of each - # job-pointer is tied to the lifetime of its corresponding "Job" - # instance. + # We have extracted all pointers jobs.info.record_count = 0 - jobs.frozen = frozen return jobs @@ -169,29 +137,7 @@ cdef class Jobs(list): Raises: RPCError: When getting the Jobs from the slurmctld failed. """ - cdef: - Jobs reloaded_jobs - Jobs new_jobs = Jobs() - dict self_dict - - if not self: - return self - - reloaded_jobs = Jobs.load().as_dict() - for idx, jid in enumerate(self): - if jid in reloaded_jobs: - # Put the new data in. - new_jobs.append(reloaded_jobs[jid]) - - if not self.frozen: - self_dict = self.as_dict() - for jid in reloaded_jobs: - if jid not in self_dict: - new_jobs.append(reloaded_jobs[jid]) - - self.clear() - self.extend(new_jobs) - return self + return xcollections.multi_reload(self, frozen=self.frozen) def load_steps(self): """Load all Job steps for this collection of Jobs. @@ -207,32 +153,27 @@ cdef class Jobs(list): RPCError: When retrieving the Job information for all the Steps failed. """ - cdef dict steps = JobSteps.load().as_dict() - - for idx, job in enumerate(self): - # Ignore any Steps from Jobs which do not exist in this - # collection. + cdef dict steps = JobSteps.load_all() + for job in self.values(): jid = job.id if jid in steps: - job_steps = self[idx].steps - job_steps.clear() - job_steps.extend(steps[jid].values()) + job.steps = steps[jid] @property def memory(self): - return _sum_prop(self, Job.memory) + return xcollections.sum_property(self, Job.memory) @property def cpus(self): - return _sum_prop(self, Job.cpus) + return xcollections.sum_property(self, Job.cpus) @property def ntasks(self): - return _sum_prop(self, Job.ntasks) + return xcollections.sum_property(self, Job.ntasks) @property def cpu_time(self): - return _sum_prop(self, Job.cpu_time) + return xcollections.sum_property(self, Job.cpu_time) cdef class Job: @@ -246,7 +187,7 @@ cdef class Job: self.passwd = {} self.groups = {} cstr.fmalloc(&self.ptr.cluster, LOCAL_CLUSTER) - self.steps = JobSteps.__new__(JobSteps) + self.steps = JobSteps() def _alloc_impl(self): if not self.ptr: @@ -261,10 +202,8 @@ cdef class Job: def __dealloc__(self): self._dealloc_impl() - def __eq__(self, other): - if isinstance(other, Job): - return self.id == other.id and self.cluster == other.cluster - return NotImplemented + def __repr__(self): + return f'{self.__class__.__name__}({self.id})' @staticmethod def load(job_id): @@ -329,7 +268,6 @@ cdef class Job: wrap.groups = {} wrap.steps = JobSteps.__new__(JobSteps) memcpy(wrap.ptr, in_ptr, sizeof(slurm_job_info_t)) - return wrap cdef _swap_data(Job dst, Job src): @@ -340,6 +278,9 @@ cdef class Job: src.ptr = tmp def as_dict(self): + return self.to_dict() + + def to_dict(self): """Job information formatted as a dictionary. Returns: @@ -355,14 +296,14 @@ cdef class Job: Args: signal (Union[str, int]): Any valid signal which will be sent to the Job. Can be either - a str like 'SIGUSR1', or simply an int. + a str like `SIGUSR1`, or simply an [int][]. steps (str): Selects which steps should be signaled. Valid values for this - are: "all", "batch" and "children". The default value is - "children", where all steps except the batch-step will be + are: `all`, `batch` and `children`. The default value is + `children`, where all steps except the batch-step will be signaled. - The value "batch" in contrast means, that only the batch-step - will be signaled. With "all" every step is signaled. + The value `batch` in contrast means, that only the batch-step + will be signaled. With `all` every step is signaled. hurry (bool): If True, no burst buffer data will be staged out. The default value is False. @@ -480,9 +421,9 @@ cdef class Job: Args: mode (str): Determines in which mode the Job should be held. Possible - values are "user" or "admin". By default, the Job is held in - "admin" mode, meaning only an Administrator will be able to - release the Job again. If you specify the mode as "user", the + values are `user` or `admin`. By default, the Job is held in + `admin` mode, meaning only an Administrator will be able to + release the Job again. If you specify the mode as `user`, the User will also be able to release the job. Raises: @@ -524,7 +465,7 @@ cdef class Job: Args: hold (bool, optional): Controls whether the Job should be put in a held state or not. - Default for this is 'False', so it will not be held. + Default for this is `False`, so it will not be held. Raises: RPCError: When requeing the Job was not successful. @@ -1242,8 +1183,9 @@ cdef class Job: Return type may still be subject to change in the future Returns: - (dict): Resource layout, where the key is the name of the name and - its value another dict with the CPU-ids, memory and gres. + (dict): Resource layout, where the key is the name of the node and + the value another dict with the keys `cpu_ids`, `memory` and + `gres`. """ # The code for this function is a modified reimplementation from here: # https://github.com/SchedMD/slurm/blob/d525b6872a106d32916b33a8738f12510ec7cf04/src/api/job_info.c#L739 diff --git a/pyslurm/core/job/step.pxd b/pyslurm/core/job/step.pxd index 458ee506..489e9d64 100644 --- a/pyslurm/core/job/step.pxd +++ b/pyslurm/core/job/step.pxd @@ -49,16 +49,11 @@ from pyslurm.utils.ctime cimport time_t from pyslurm.core.job.task_dist cimport TaskDistribution -cdef class JobSteps(list): - """A collection of [pyslurm.JobStep][] objects for a given Job. - - Args: - job (Union[Job, int]): - A Job for which the Steps should be loaded. +cdef class JobSteps(dict): + """A [dict][] of [pyslurm.JobStep][] objects for a given Job. Raises: RPCError: When getting the Job steps from the slurmctld failed. - MemoryError: If malloc fails to allocate memory. """ cdef: @@ -68,8 +63,7 @@ cdef class JobSteps(list): @staticmethod cdef JobSteps _load_single(Job job) - - cdef _load_data(self, uint32_t job_id, int flags) + cdef dict _load_data(self, uint32_t job_id, int flags) cdef class JobStep: @@ -85,9 +79,6 @@ cdef class JobStep: time_limit (int): Time limit in Minutes for this step. - Raises: - MemoryError: If malloc fails to allocate memory. - Attributes: id (Union[str, int]): The id for this step. diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index d4038f54..54cb8f59 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -26,13 +26,12 @@ from typing import Union from pyslurm.utils import cstr, ctime from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc -from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.settings import LOCAL_CLUSTER +from pyslurm import xcollections from pyslurm.utils.helpers import ( signal_to_num, instance_to_dict, uid_to_name, - collection_to_dict, - group_collection_by_cluster, humanize_step_id, dehumanize_step_id, ) @@ -46,7 +45,7 @@ from pyslurm.utils.ctime import ( ) -cdef class JobSteps(list): +cdef class JobSteps(dict): def __dealloc__(self): slurm_free_job_step_info_response_msg(self.info) @@ -55,73 +54,48 @@ cdef class JobSteps(list): self.info = NULL def __init__(self, steps=None): - if isinstance(steps, list): - self.extend(steps) + if isinstance(steps, dict): + self.update(steps) elif steps is not None: raise TypeError("Invalid Type: {type(steps)}") - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=JobStep.id, - recursive=recursive, group_id=JobStep.job_id) - col = col.get(LOCAL_CLUSTER, {}) - if self._job_id: - return col.get(self._job_id, {}) - - return col - - def group_by_cluster(self): - return group_collection_by_cluster(self) - @staticmethod - def load(job_id=0): + def load(job): """Load the Job Steps from the system. Args: - job_id (Union[Job, int]): + job (Union[Job, int]): The Job for which the Steps should be loaded. Returns: (pyslurm.JobSteps): JobSteps of the Job """ cdef: - Job job + Job _job JobSteps steps - if job_id: - job = Job.load(job_id.id if isinstance(job_id, Job) else job_id) - steps = JobSteps._load_single(job) - steps._job_id = job.id - return steps - else: - steps = JobSteps() - return steps._load_data(0, slurm.SHOW_ALL) + _job = Job.load(job.id if isinstance(job, Job) else job) + steps = JobSteps._load_single(_job) + steps._job_id = _job.id + return steps @staticmethod cdef JobSteps _load_single(Job job): cdef JobSteps steps = JobSteps() - steps._load_data(job.id, slurm.SHOW_ALL) - if not steps and not slurm.IS_JOB_PENDING(job.ptr): + data = steps._load_data(job.id, slurm.SHOW_ALL) + if not data and not slurm.IS_JOB_PENDING(job.ptr): msg = f"Failed to load step info for Job {job.id}." raise RPCError(msg=msg) + steps.update(data[job.id]) return steps - - cdef _load_data(self, uint32_t job_id, int flags): + + cdef dict _load_data(self, uint32_t job_id, int flags): cdef: JobStep step uint32_t cnt = 0 + dict steps = {} rc = slurm_get_job_steps(0, job_id, slurm.NO_VAL, &self.info, flags) @@ -133,21 +107,29 @@ cdef class JobSteps(list): # Put each job-step pointer into its own "JobStep" instance. for cnt in range(self.info.job_step_count): step = JobStep.from_ptr(&self.info.job_steps[cnt]) - # Prevent double free if xmalloc fails mid-loop and a MemoryError # is raised by replacing it with a zeroed-out job_step_info_t. self.info.job_steps[cnt] = self.tmp_info - self.append(step) - - # At this point we memcpy'd all the memory for the Steps. Setting this - # to 0 will prevent the slurm step free function to deallocate the - # memory for the individual steps. This should be fine, because they - # are free'd automatically in __dealloc__ since the lifetime of each - # step-pointer is tied to the lifetime of its corresponding JobStep - # instance. + + job_id = step.job_id + if not job_id in steps: + steps[job_id] = JobSteps() + steps[job_id][step.id] = step + + # We have extracted all pointers self.info.job_step_count = 0 + return steps - return self + @staticmethod + def load_all(): + """Loads all the steps in the system. + + Returns: + (dict): A dict where every JobID (key) is mapped with an instance + of its JobSteps (value). + """ + cdef JobSteps steps = JobSteps() + return steps._load_data(slurm.NO_VAL, slurm.SHOW_ALL) cdef class JobStep: @@ -160,6 +142,7 @@ cdef class JobStep: self._alloc_impl() self.job_id = job_id.id if isinstance(job_id, Job) else job_id self.id = step_id + cstr.fmalloc(&self.ptr.cluster, LOCAL_CLUSTER) # Initialize attributes, if any were provided for k, v in kwargs.items(): @@ -203,6 +186,9 @@ cdef class JobStep: # Call descriptors __set__ directly JobStep.__dict__[name].__set__(self, val) + def __repr__(self): + return f'{self.__class__.__name__}({self.id})' + @staticmethod def load(job_id, step_id): """Load information for a specific job step. @@ -221,7 +207,6 @@ cdef class JobStep: Raises: RPCError: When retrieving Step information from the slurmctld was not successful. - MemoryError: If malloc failed to allocate memory. Examples: >>> import pyslurm @@ -264,7 +249,7 @@ cdef class JobStep: Args: signal (Union[str, int]): Any valid signal which will be sent to the Job. Can be either - a str like 'SIGUSR1', or simply an int. + a str like `SIGUSR1`, or simply an [int][]. Raises: RPCError: When sending the signal was not successful. @@ -326,6 +311,9 @@ cdef class JobStep: verify_rpc(slurm_update_step(js.umsg)) def as_dict(self): + return self.to_dict() + + def to_dict(self): """JobStep information formatted as a dictionary. Returns: diff --git a/pyslurm/core/job/submission.pyx b/pyslurm/core/job/submission.pyx index bf47105b..df33992b 100644 --- a/pyslurm/core/job/submission.pyx +++ b/pyslurm/core/job/submission.pyx @@ -81,7 +81,6 @@ cdef class JobSubmitDescription: Raises: RPCError: When the job submission was not successful. - MemoryError: If malloc failed to allocate enough memory. Examples: >>> import pyslurm diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index ea59e6ff..5167de78 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -55,10 +55,11 @@ from pyslurm.utils cimport cstr from pyslurm.utils cimport ctime from pyslurm.utils.ctime cimport time_t from pyslurm.utils.uint cimport * +from pyslurm.xcollections cimport MultiClusterMap -cdef class Nodes(list): - """A collection of [pyslurm.Node][] objects. +cdef class Nodes(MultiClusterMap): + """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.Node][] objects. Args: nodes (Union[list, dict, str], optional=None): @@ -83,9 +84,6 @@ cdef class Nodes(list): Total amount of Watts consumed in this node collection. avg_watts (int): Amount of average watts consumed in this node collection. - - Raises: - MemoryError: If malloc fails to allocate memory. """ cdef: node_info_msg_t *info @@ -165,7 +163,7 @@ cdef class Node: memory_reserved_for_system (int): Raw Memory in Mebibytes reserved for the System not usable by Jobs. - temporary_disk_space_per_node (int): + temporary_disk (int): Amount of temporary disk space this node has, in Mebibytes. weight (int): Weight of the node in scheduling. @@ -223,9 +221,6 @@ cdef class Node: CPU Load on the Node. slurmd_port (int): Port the slurmd is listening on the node. - - Raises: - MemoryError: If malloc fails to allocate memory. """ cdef: node_info_t *info diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 609016fe..eac1bfef 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -28,7 +28,8 @@ from pyslurm.utils import ctime from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time -from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.settings import LOCAL_CLUSTER +from pyslurm import xcollections from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -37,15 +38,12 @@ from pyslurm.utils.helpers import ( _getpwall_to_dict, cpubind_to_num, instance_to_dict, - collection_to_dict, - group_collection_by_cluster, - _sum_prop, nodelist_from_range_str, nodelist_to_range_str, ) -cdef class Nodes(list): +cdef class Nodes(MultiClusterMap): def __dealloc__(self): slurm_free_node_info_msg(self.info) @@ -56,38 +54,11 @@ cdef class Nodes(list): self.part_info = NULL def __init__(self, nodes=None): - if isinstance(nodes, list): - for node in nodes: - if isinstance(node, str): - self.append(Node(node)) - else: - self.append(node) - elif isinstance(nodes, str): - nodelist = nodes.split(",") - self.extend([Node(node) for node in nodelist]) - elif isinstance(nodes, dict): - self.extend([node for node in nodes.values()]) - elif nodes is not None: - raise TypeError("Invalid Type: {type(nodes)}") - - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Node.name, - recursive=recursive) - return col.get(LOCAL_CLUSTER, {}) - - def group_by_cluster(self): - return group_collection_by_cluster(self) + super().__init__(data=nodes, + typ="Nodes", + val_type=Node, + id_attr=Node.name, + key_type=str) @staticmethod def load(preload_passwd_info=False): @@ -107,12 +78,11 @@ cdef class Nodes(list): Raises: RPCError: When getting all the Nodes from the slurmctld failed. - MemoryError: If malloc fails to allocate memory. """ cdef: dict passwd = {} dict groups = {} - Nodes nodes = Nodes.__new__(Nodes) + Nodes nodes = Nodes() int flags = slurm.SHOW_ALL Node node @@ -141,16 +111,13 @@ cdef class Nodes(list): node.passwd = passwd node.groups = groups - nodes.append(node) + cluster = node.cluster + if cluster not in nodes.data: + nodes.data[cluster] = {} + nodes.data[cluster][node.name] = node - # At this point we memcpy'd all the memory for the Nodes. Setting this - # to 0 will prevent the slurm node free function to deallocate the - # memory for the individual nodes. This should be fine, because they - # are free'd automatically in __dealloc__ since the lifetime of each - # node-pointer is tied to the lifetime of its corresponding "Node" - # instance. + # We have extracted all pointers nodes.info.record_count = 0 - return nodes def reload(self): @@ -164,19 +131,7 @@ cdef class Nodes(list): Raises: RPCError: When getting the Nodes from the slurmctld failed. """ - cdef Nodes reloaded_nodes - - if not self: - return self - - reloaded_nodes = Nodes.load().as_dict() - for idx, node in enumerate(self): - node_name = node.name - if node in reloaded_nodes: - # Put the new data in. - self[idx] = reloaded_nodes[node_name] - - return self + return xcollections.multi_reload(self) def modify(self, Node changes): """Modify all Nodes in a collection. @@ -199,50 +154,47 @@ cdef class Nodes(list): >>> # Apply the changes to all the nodes >>> nodes.modify(changes) """ - cdef: - Node n = changes - list node_names = [node.name for node in self] - - node_str = nodelist_to_range_str(node_names) + cdef Node n = changes + node_str = nodelist_to_range_str(list(self.keys())) n._alloc_umsg() cstr.fmalloc(&n.umsg.node_names, node_str) verify_rpc(slurm_update_node(n.umsg)) @property def free_memory(self): - return _sum_prop(self, Node.free_memory) + return xcollections.sum_property(self, Node.free_memory) @property def real_memory(self): - return _sum_prop(self, Node.real_memory) + return xcollections.sum_property(self, Node.real_memory) @property def allocated_memory(self): - return _sum_prop(self, Node.allocated_memory) + return xcollections.sum_property(self, Node.allocated_memory) @property def total_cpus(self): - return _sum_prop(self, Node.total_cpus) + return xcollections.sum_property(self, Node.total_cpus) @property def idle_cpus(self): - return _sum_prop(self, Node.idle_cpus) + return xcollections.sum_property(self, Node.idle_cpus) @property def allocated_cpus(self): - return _sum_prop(self, Node.allocated_cpus) + return xcollections.sum_property(self, Node.allocated_cpus) @property def effective_cpus(self): - return _sum_prop(self, Node.effective_cpus) + return xcollections.sum_property(self, Node.effective_cpus) @property def current_watts(self): - return _sum_prop(self, Node.current_watts) + return xcollections.sum_property(self, Node.current_watts) @property def avg_watts(self): - return _sum_prop(self, Node.avg_watts) + return xcollections.sum_property(self, Node.avg_watts) cdef class Node: @@ -293,8 +245,8 @@ cdef class Node: # Call descriptors __set__ directly Node.__dict__[name].__set__(self, val) - def __eq__(self, other): - return isinstance(other, Node) and self.name == other.name + def __repr__(self): + return f'{self.__class__.__name__}({self.name})' @staticmethod cdef Node from_ptr(node_info_t *in_ptr): @@ -325,7 +277,6 @@ cdef class Node: Raises: RPCError: If requesting the Node information from the slurmctld was not successful. - MemoryError: If malloc failed to allocate memory. Examples: >>> import pyslurm @@ -365,7 +316,7 @@ cdef class Node: Args: state (str, optional): An optional state the created Node should have. Allowed values - are "future" and "cloud". "future" is the default. + are `future` and `cloud`. `future` is the default. Returns: (pyslurm.Node): This function returns the current Node-instance @@ -373,7 +324,6 @@ cdef class Node: Raises: RPCError: If creating the Node was not successful. - MemoryError: If malloc failed to allocate memory. Examples: >>> import pyslurm @@ -424,7 +374,6 @@ cdef class Node: Raises: RPCError: If deleting the Node was not successful. - MemoryError: If malloc failed to allocate memory. Examples: >>> import pyslurm @@ -434,6 +383,9 @@ cdef class Node: verify_rpc(slurm_delete_node(self.umsg)) def as_dict(self): + return self.to_dict() + + def to_dict(self): """Node information formatted as a dictionary. Returns: @@ -442,7 +394,7 @@ cdef class Node: Examples: >>> import pyslurm >>> mynode = pyslurm.Node.load("mynode") - >>> mynode_dict = mynode.as_dict() + >>> mynode_dict = mynode.to_dict() """ return instance_to_dict(self) @@ -559,7 +511,7 @@ cdef class Node: return u64_parse(self.info.mem_spec_limit) @property - def temporary_disk_space(self): + def temporary_disk(self): return u32_parse(self.info.tmp_disk) @property diff --git a/pyslurm/core/partition.pxd b/pyslurm/core/partition.pxd index b10366b8..a5a638df 100644 --- a/pyslurm/core/partition.pxd +++ b/pyslurm/core/partition.pxd @@ -56,10 +56,11 @@ from pyslurm.utils cimport ctime from pyslurm.utils.ctime cimport time_t from pyslurm.utils.uint cimport * from pyslurm.core cimport slurmctld +from pyslurm.xcollections cimport MultiClusterMap -cdef class Partitions(list): - """A collection of [pyslurm.Partition][] objects. +cdef class Partitions(MultiClusterMap): + """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.Partition][] objects. Args: partitions (Union[list[str], dict[str, Partition], str], optional=None): @@ -167,7 +168,7 @@ cdef class Partition: This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] min_nodes (int): Minimum number of Nodes that must be requested by Jobs - max_time_limit (int): + max_time (int): Max Time-Limit in minutes that Jobs can request This can also return [UNLIMITED][pyslurm.constants.UNLIMITED] diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index 56375d33..e1a1b6b1 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -30,7 +30,8 @@ from pyslurm.utils.uint import * from pyslurm.core.error import RPCError, verify_rpc from pyslurm.utils.ctime import timestamp_to_date, _raw_time from pyslurm.constants import UNLIMITED -from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.settings import LOCAL_CLUSTER +from pyslurm import xcollections from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, @@ -38,9 +39,6 @@ from pyslurm.utils.helpers import ( _getpwall_to_dict, cpubind_to_num, instance_to_dict, - collection_to_dict, - group_collection_by_cluster, - _sum_prop, dehumanize, ) from pyslurm.utils.ctime import ( @@ -49,7 +47,7 @@ from pyslurm.utils.ctime import ( ) -cdef class Partitions(list): +cdef class Partitions(MultiClusterMap): def __dealloc__(self): slurm_free_partition_info_msg(self.info) @@ -58,38 +56,11 @@ cdef class Partitions(list): self.info = NULL def __init__(self, partitions=None): - if isinstance(partitions, list): - for part in partitions: - if isinstance(part, str): - self.append(Partition(part)) - else: - self.append(part) - elif isinstance(partitions, str): - partlist = partitions.split(",") - self.extend([Partition(part) for part in partlist]) - elif isinstance(partitions, dict): - self.extend([part for part in partitions.values()]) - elif partitions is not None: - raise TypeError("Invalid Type: {type(partitions)}") - - def as_dict(self, recursive=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Partition.name, - recursive=recursive) - return col.get(LOCAL_CLUSTER, {}) - - def group_by_cluster(self): - return group_collection_by_cluster(self) + super().__init__(data=partitions, + typ="Partitions", + val_type=Partition, + id_attr=Partition.name, + key_type=str) @staticmethod def load(): @@ -103,7 +74,7 @@ cdef class Partitions(list): failed. """ cdef: - Partitions partitions = Partitions.__new__(Partitions) + Partitions partitions = Partitions() int flags = slurm.SHOW_ALL Partition partition slurmctld.Config slurm_conf @@ -126,18 +97,16 @@ cdef class Partitions(list): # is raised by replacing it with a zeroed-out partition_info_t. partitions.info.partition_array[cnt] = partitions.tmp_info + cluster = partition.cluster + if cluster not in partitions.data: + partitions.data[cluster] = {} + partition.power_save_enabled = power_save_enabled partition.slurm_conf = slurm_conf - partitions.append(partition) - - # At this point we memcpy'd all the memory for the Partitions. Setting - # this to 0 will prevent the slurm partition free function to - # deallocate the memory for the individual partitions. This should be - # fine, because they are free'd automatically in __dealloc__ since the - # lifetime of each partition-pointer is tied to the lifetime of its - # corresponding "Partition" instance. - partitions.info.record_count = 0 + partitions.data[cluster][partition.name] = partition + # We have extracted all pointers + partitions.info.record_count = 0 return partitions def reload(self): @@ -154,19 +123,7 @@ cdef class Partitions(list): Raises: RPCError: When getting the Partitions from the slurmctld failed. """ - cdef dict reloaded_parts - - if not self: - return self - - reloaded_parts = Partitions.load().as_dict() - for idx, part in enumerate(self): - part_name = part.name - if part_name in reloaded_parts: - # Put the new data in. - self[idx] = reloaded_parts[part_name] - - return self + return xcollections.multi_reload(self) def modify(self, changes): """Modify all Partitions in a Collection. @@ -189,16 +146,16 @@ cdef class Partitions(list): >>> # Apply the changes to all the partitions >>> parts.modify(changes) """ - for part in self: + for part in self.values(): part.modify(changes) @property def total_cpus(self): - return _sum_prop(self, Partition.total_cpus) + return xcollections.sum_property(self, Partition.total_cpus) @property def total_nodes(self): - return _sum_prop(self, Partition.total_nodes) + return xcollections.sum_property(self, Partition.total_nodes) cdef class Partition: @@ -228,6 +185,9 @@ cdef class Partition: def __dealloc__(self): self._dealloc_impl() + def __repr__(self): + return f'{self.__class__.__name__}({self.name})' + @staticmethod cdef Partition from_ptr(partition_info_t *in_ptr): cdef Partition wrap = Partition.__new__(Partition) @@ -243,6 +203,9 @@ cdef class Partition: return self.name def as_dict(self): + return self.to_dict() + + def to_dict(self): """Partition information formatted as a dictionary. Returns: @@ -251,7 +214,7 @@ cdef class Partition: Examples: >>> import pyslurm >>> mypart = pyslurm.Partition.load("mypart") - >>> mypart_dict = mypart.as_dict() + >>> mypart_dict = mypart.to_dict() """ return instance_to_dict(self) @@ -274,11 +237,11 @@ cdef class Partition: >>> import pyslurm >>> part = pyslurm.Partition.load("normal") """ - partitions = Partitions.load().as_dict() - if name not in partitions: + part = Partitions.load().get(name) + if not part: raise RPCError(msg=f"Partition '{name}' doesn't exist") - return partitions[name] + return part def create(self): """Create a Partition. @@ -341,7 +304,6 @@ cdef class Partition: """ cdef delete_part_msg_t del_part_msg memset(&del_part_msg, 0, sizeof(del_part_msg)) - del_part_msg.name = cstr.from_unicode(self._error_or_name()) verify_rpc(slurm_delete_partition(&del_part_msg)) @@ -357,6 +319,10 @@ cdef class Partition: def name(self): return cstr.to_unicode(self.ptr.name) + @property + def _id(self): + return self.name + @name.setter def name(self, val): cstr.fmalloc(&self.ptr.name, val) @@ -546,11 +512,11 @@ cdef class Partition: self.ptr.min_nodes = u32(val, zero_is_noval=False) @property - def max_time_limit(self): + def max_time(self): return _raw_time(self.ptr.max_time, on_inf=UNLIMITED) - @max_time_limit.setter - def max_time_limit(self, val): + @max_time.setter + def max_time(self, val): self.ptr.max_time = timestr_to_mins(val) @property diff --git a/pyslurm/db/__init__.py b/pyslurm/db/__init__.py index 0e78a734..acd36a40 100644 --- a/pyslurm/db/__init__.py +++ b/pyslurm/db/__init__.py @@ -42,4 +42,3 @@ Association, AssociationFilter, ) -from . import cluster diff --git a/pyslurm/db/assoc.pxd b/pyslurm/db/assoc.pxd index 12a0cde1..384dbb0a 100644 --- a/pyslurm/db/assoc.pxd +++ b/pyslurm/db/assoc.pxd @@ -49,12 +49,13 @@ from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr from pyslurm.utils.uint cimport * from pyslurm.db.qos cimport QualitiesOfService, _set_qos_list +from pyslurm.xcollections cimport MultiClusterMap cdef _parse_assoc_ptr(Association ass) cdef _create_assoc_ptr(Association ass, conn=*) -cdef class Associations(list): +cdef class Associations(MultiClusterMap): pass @@ -69,8 +70,8 @@ cdef class AssociationFilter: cdef class Association: cdef: slurmdb_assoc_rec_t *ptr - dict qos_data - dict tres_data + QualitiesOfService qos_data + TrackableResources tres_data cdef public: group_tres diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index d1ac4789..4e535a46 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -25,47 +25,22 @@ from pyslurm.core.error import RPCError from pyslurm.utils.helpers import ( instance_to_dict, - collection_to_dict, - group_collection_by_cluster, user_to_uid, ) from pyslurm.utils.uint import * from pyslurm.db.connection import _open_conn_or_error -from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.settings import LOCAL_CLUSTER +from pyslurm import xcollections -cdef class Associations(list): +cdef class Associations(MultiClusterMap): - def __init__(self): - pass - - def as_dict(self, recursive=False, group_by_cluster=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - group_by_cluster (bool, optional): - By default, only the Jobs from your local Cluster are - returned. If this is set to `True`, then all the Jobs in the - collection will be grouped by the Cluster - with the name of - the cluster as the key and the value being the collection as - another dict. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Association.id, - recursive=recursive) - if not group_by_cluster: - return col.get(LOCAL_CLUSTER, {}) - - return col - - def group_by_cluster(self): - return group_collection_by_cluster(self) + def __init__(self, assocs=None): + super().__init__(data=assocs, + typ="Associations", + val_type=Association, + id_attr=Association.id, + key_type=int) @staticmethod def load(AssociationFilter db_filter=None, Connection db_connection=None): @@ -76,8 +51,8 @@ cdef class Associations(list): SlurmList assoc_data SlurmListItem assoc_ptr Connection conn - dict qos_data - dict tres_data + QualitiesOfService qos_data + TrackableResources tres_data # Prepare SQL Filter if not db_filter: @@ -96,10 +71,10 @@ cdef class Associations(list): # Fetch other necessary dependencies needed for translating some # attributes (i.e QoS IDs to its name) - qos_data = QualitiesOfService.load(db_connection=conn).as_dict( - name_is_key=False) - tres_data = TrackableResources.load(db_connection=conn).as_dict( - name_is_key=False) + qos_data = QualitiesOfService.load(db_connection=conn, + name_is_key=False) + tres_data = TrackableResources.load(db_connection=conn, + name_is_key=False) # Setup Association objects for assoc_ptr in SlurmList.iter_and_pop(assoc_data): @@ -107,7 +82,11 @@ cdef class Associations(list): assoc.qos_data = qos_data assoc.tres_data = tres_data _parse_assoc_ptr(assoc) - out.append(assoc) + + cluster = assoc.cluster + if cluster not in out.data: + out.data[cluster] = {} + out.data[cluster][assoc.id] = assoc return out @@ -226,13 +205,16 @@ cdef class Association: slurmdb_init_assoc_rec(self.ptr, 0) + def __repr__(self): + return f'{self.__class__.__name__}({self.id})' + @staticmethod cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr): cdef Association wrap = Association.__new__(Association) wrap.ptr = in_ptr return wrap - def as_dict(self): + def to_dict(self): """Database Association information formatted as a dictionary. Returns: @@ -408,8 +390,8 @@ cdef class Association: cdef _parse_assoc_ptr(Association ass): cdef: - dict tres = ass.tres_data - dict qos = ass.qos_data + TrackableResources tres = ass.tres_data + QualitiesOfService qos = ass.qos_data ass.group_tres = TrackableResourceLimits.from_ids( ass.ptr.grp_tres, tres) diff --git a/pyslurm/db/cluster.pxd b/pyslurm/db/cluster.pxd deleted file mode 100644 index 30acdbde..00000000 --- a/pyslurm/db/cluster.pxd +++ /dev/null @@ -1,27 +0,0 @@ -######################################################################### -# cluster.pxd - pyslurm slurmdbd cluster api -######################################################################### -# Copyright (C) 2023 Toni Harzendorf -# -# This file is part of PySlurm -# -# PySlurm is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. - -# PySlurm is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with PySlurm; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# cython: c_string_type=unicode, c_string_encoding=default -# cython: language_level=3 - - -from pyslurm cimport slurm -from pyslurm.utils cimport cstr diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index fc395943..bf21c003 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -53,6 +53,7 @@ from pyslurm.db.connection cimport Connection from pyslurm.utils cimport cstr from pyslurm.db.qos cimport QualitiesOfService from pyslurm.db.tres cimport TrackableResources, TrackableResource +from pyslurm.xcollections cimport MultiClusterMap cdef class JobFilter: @@ -150,8 +151,8 @@ cdef class JobFilter: with_env -cdef class Jobs(list): - """A collection of [pyslurm.db.Job][] objects.""" +cdef class Jobs(MultiClusterMap): + """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.db.Job][] objects.""" pass @@ -161,6 +162,8 @@ cdef class Job: Args: job_id (int, optional=0): An Integer representing a Job-ID. + cluster (str, optional=None): + Name of the Cluster for this Job. Other Parameters: admin_comment (str): @@ -283,7 +286,7 @@ cdef class Job: """ cdef: slurmdb_job_rec_t *ptr - dict qos_data + QualitiesOfService qos_data cdef public: JobSteps steps diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 636e1137..905f206a 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -27,7 +27,8 @@ from pyslurm.core.error import RPCError, PyslurmError from pyslurm.core import slurmctld from typing import Any from pyslurm.utils.uint import * -from pyslurm.db.cluster import LOCAL_CLUSTER +from pyslurm.settings import LOCAL_CLUSTER +from pyslurm import xcollections from pyslurm.utils.ctime import ( date_to_timestamp, timestr_to_mins, @@ -40,8 +41,6 @@ from pyslurm.utils.helpers import ( uid_to_name, nodelist_to_range_str, instance_to_dict, - collection_to_dict, - group_collection_by_cluster, _get_exit_code, ) from pyslurm.db.connection import _open_conn_or_error @@ -80,7 +79,7 @@ cdef class JobFilter: qos_data = QualitiesOfService.load() for user_input in self.qos: found = False - for qos in qos_data: + for qos in qos_data.values(): if (qos.id == user_input or qos.name == user_input or qos == user_input): @@ -189,54 +188,14 @@ cdef class JobFilter: JobSearchFilter = JobFilter -cdef class Jobs(list): +cdef class Jobs(MultiClusterMap): def __init__(self, jobs=None): - if isinstance(jobs, list): - for job in jobs: - if isinstance(job, int): - self.append(Job(job)) - else: - self.append(job) - elif isinstance(jobs, str): - joblist = jobs.split(",") - self.extend([Job(job) for job in joblist]) - elif isinstance(jobs, dict): - self.extend([job for job in jobs.values()]) - elif jobs is not None: - raise TypeError("Invalid Type: {type(jobs)}") - - def as_dict(self, recursive=False, group_by_cluster=False): - """Convert the collection data to a dict. - - Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. - group_by_cluster (bool, optional): - By default, only the Jobs from your local Cluster are - returned. If this is set to `True`, then all the Jobs in the - collection will be grouped by the Cluster - with the name of - the cluster as the key and the value being the collection as - another dict. - - Returns: - (dict): Collection as a dict. - """ - col = collection_to_dict(self, identifier=Job.id, recursive=recursive) - if not group_by_cluster: - return col.get(LOCAL_CLUSTER, {}) - - return col - - def group_by_cluster(self): - """Group Jobs by cluster name - - Returns: - (dict[str, Jobs]): Jobs grouped by cluster. - """ - return group_collection_by_cluster(self) + super().__init__(data=jobs, + typ="Jobs", + val_type=Job, + id_attr=Job.id, + key_type=int) @staticmethod def load(JobFilter db_filter=None, Connection db_connection=None): @@ -280,7 +239,7 @@ cdef class Jobs(list): SlurmList job_data SlurmListItem job_ptr Connection conn - dict qos_data + QualitiesOfService qos_data # Prepare SQL Filter if not db_filter: @@ -297,15 +256,14 @@ cdef class Jobs(list): # Fetch other necessary dependencies needed for translating some # attributes (i.e QoS IDs to its name) - qos_data = QualitiesOfService.load(db_connection=conn).as_dict( - name_is_key=False) + qos_data = QualitiesOfService.load(db_connection=conn, + name_is_key=False) # TODO: also get trackable resources with slurmdb_tres_get and store # it in each job instance. tres_alloc_str and tres_req_str only # contain the numeric tres ids, but it probably makes more sense to # convert them to its type name for the user in advance. - # TODO: For multi-cluster support, remove duplicate federation jobs # TODO: How to handle the possibility of duplicate job ids that could # appear if IDs on a cluster are resetted? for job_ptr in SlurmList.iter_and_pop(job_data): @@ -313,7 +271,11 @@ cdef class Jobs(list): job.qos_data = qos_data job._create_steps() JobStatistics._sum_step_stats_for_job(job, job.steps) - out.append(job) + + cluster = job.cluster + if cluster not in out.data: + out.data[cluster] = {} + out[cluster][job.id] = job return out @@ -363,7 +325,7 @@ cdef class Jobs(list): >>> changes = pyslurm.db.Job(comment="A comment for the job") >>> modified_jobs = pyslurm.db.Jobs.modify(db_filter, changes) >>> print(modified_jobs) - >>> [9999] + [9999] In the above example, the changes will be automatically committed if successful. @@ -380,7 +342,7 @@ cdef class Jobs(list): >>> >>> # Now you can first examine which Jobs have been modified >>> print(modified_jobs) - >>> [9999] + [9999] >>> # And then you can actually commit (or even rollback) the >>> # changes >>> db_conn.commit() @@ -420,7 +382,7 @@ cdef class Jobs(list): # # " submitted at " # - # We are just interest in the Job-ID, so extract it + # We are just interested in the Job-ID, so extract it job_id = response_str.split(" ")[0] if job_id and job_id.isdigit(): out.append(int(job_id)) @@ -444,10 +406,14 @@ cdef class Job: def __cinit__(self): self.ptr = NULL - def __init__(self, job_id=0, cluster=LOCAL_CLUSTER, **kwargs): + def __init__(self, job_id=0, cluster=None, **kwargs): self._alloc_impl() self.ptr.jobid = int(job_id) - cstr.fmalloc(&self.ptr.cluster, cluster) + cstr.fmalloc(&self.ptr.cluster, + LOCAL_CLUSTER if not cluster else cluster) + self.qos_data = QualitiesOfService() + self.steps = JobSteps() + self.stats = JobStatistics() for k, v in kwargs.items(): setattr(self, k, v) @@ -471,12 +437,20 @@ cdef class Job: return wrap @staticmethod - def load(job_id, cluster=LOCAL_CLUSTER, with_script=False, with_env=False): + def load(job_id, cluster=None, with_script=False, with_env=False): """Load the information for a specific Job from the Database. Args: job_id (int): ID of the Job to be loaded. + cluster (str): + Name of the Cluster to search in. + with_script (bool): + Whether the Job-Script should also be loaded. Mutually + exclusive with `with_env`. + with_env (bool): + Whether the Job Environment should also be loaded. Mutually + exclusive with `with_script`. Returns: (pyslurm.db.Job): Returns a new Database Job instance @@ -489,24 +463,24 @@ cdef class Job: >>> import pyslurm >>> db_job = pyslurm.db.Job.load(10000) - In the above example, attribute like "script" and "environment" + In the above example, attributes like `script` and `environment` are not populated. You must explicitly request one of them to be loaded: >>> import pyslurm >>> db_job = pyslurm.db.Job.load(10000, with_script=True) >>> print(db_job.script) - """ + cluster = LOCAL_CLUSTER if not cluster else cluster jfilter = JobFilter(ids=[int(job_id)], clusters=[cluster], with_script=with_script, with_env=with_env) - jobs = Jobs.load(jfilter) - if not jobs: + job = Jobs.load(jfilter).get((cluster, int(job_id))) + if not job: raise RPCError(msg=f"Job {job_id} does not exist on " f"Cluster {cluster}") # TODO: There might be multiple entries when job ids were reset. - return jobs[0] + return job def _create_steps(self): cdef: @@ -520,7 +494,10 @@ cdef class Job: self.steps[step.id] = step def as_dict(self): - """Database Job information formatted as a dictionary. + return self.to_dict() + + def to_dict(self): + """Convert Database Job information to a dictionary. Returns: (dict): Database Job information as dict @@ -528,20 +505,23 @@ cdef class Job: Examples: >>> import pyslurm >>> myjob = pyslurm.db.Job.load(10000) - >>> myjob_dict = myjob.as_dict() + >>> myjob_dict = myjob.to_dict() """ cdef dict out = instance_to_dict(self) if self.stats: - out["stats"] = self.stats.as_dict() + out["stats"] = self.stats.to_dict() steps = out.pop("steps", {}) out["steps"] = {} for step_id, step in steps.items(): - out["steps"][step_id] = step.as_dict() + out["steps"][step_id] = step.to_dict() return out + def __repr__(self): + return f'{self.__class__.__name__}({self.id})' + def modify(self, changes, db_connection=None): """Modify a Slurm database Job. diff --git a/pyslurm/db/qos.pxd b/pyslurm/db/qos.pxd index 9cb3df86..ea0fde2d 100644 --- a/pyslurm/db/qos.pxd +++ b/pyslurm/db/qos.pxd @@ -44,7 +44,7 @@ from pyslurm.utils cimport cstr cdef _set_qos_list(List *in_list, vals, QualitiesOfService data) -cdef class QualitiesOfService(list): +cdef class QualitiesOfService(dict): pass diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index a01ef9b0..299c0ed9 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -23,41 +23,26 @@ # cython: language_level=3 from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict, collection_to_dict_global +from pyslurm.utils.helpers import instance_to_dict from pyslurm.db.connection import _open_conn_or_error -cdef class QualitiesOfService(list): +cdef class QualitiesOfService(dict): def __init__(self): pass - def as_dict(self, recursive=False, name_is_key=True): - """Convert the collection data to a dict. + @staticmethod + def load(QualityOfServiceFilter db_filter=None, + Connection db_connection=None, name_is_key=True): + """Load QoS data from the Database Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. name_is_key (bool, optional): By default, the keys in this dict are the names of each QoS. If this is set to `False`, then the unique ID of the QoS will be used as dict keys. - - Returns: - (dict): Collection as a dict. """ - identifier = QualityOfService.name - if not name_is_key: - identifier = QualityOfService.id - - return collection_to_dict_global(self, identifier=identifier, - recursive=recursive) - - @staticmethod - def load(QualityOfServiceFilter db_filter=None, - Connection db_connection=None): cdef: QualitiesOfService out = QualitiesOfService() QualityOfService qos @@ -83,7 +68,8 @@ cdef class QualitiesOfService(list): # Setup QOS objects for qos_ptr in SlurmList.iter_and_pop(qos_data): qos = QualityOfService.from_ptr(qos_ptr.data) - out.append(qos) + _id = qos.name if name_is_key else qos.id + out[_id] = qos return out @@ -170,7 +156,10 @@ cdef class QualityOfService: wrap.ptr = in_ptr return wrap - def as_dict(self): + def __repr__(self): + return f'{self.__class__.__name__}({self.name})' + + def to_dict(self): """Database QualityOfService information formatted as a dictionary. Returns: @@ -195,11 +184,11 @@ cdef class QualityOfService: sucessful. """ qfilter = QualityOfServiceFilter(names=[name]) - qos_data = QualitiesOfService.load(qfilter) - if not qos_data: + qos = QualitiesOfService.load(qfilter).get(name) + if not qos: raise RPCError(msg=f"QualityOfService {name} does not exist") - return qos_data[0] + return qos @property def name(self): @@ -227,7 +216,7 @@ def _qos_names_to_ids(qos_list, QualitiesOfService data): def _validate_qos_single(qid, QualitiesOfService data): - for item in data: + for item in data.values(): if qid == item.id or qid == item.name: return item.id diff --git a/pyslurm/db/stats.pyx b/pyslurm/db/stats.pyx index 3ae0c8b5..7bbb2a8a 100644 --- a/pyslurm/db/stats.pyx +++ b/pyslurm/db/stats.pyx @@ -47,7 +47,7 @@ cdef class JobStatistics: self.min_cpu_time_node = None self.min_cpu_time_task = None - def as_dict(self): + def to_dict(self): return instance_to_dict(self) @staticmethod diff --git a/pyslurm/db/step.pxd b/pyslurm/db/step.pxd index aef7120b..ab0ff70c 100644 --- a/pyslurm/db/step.pxd +++ b/pyslurm/db/step.pxd @@ -44,7 +44,7 @@ from pyslurm.db.tres cimport TrackableResources, TrackableResource cdef class JobSteps(dict): - """A collection of [pyslurm.db.JobStep][] objects""" + """A [dict][] of [pyslurm.db.JobStep][] objects""" pass diff --git a/pyslurm/db/step.pyx b/pyslurm/db/step.pyx index fa4ab8bb..e39af066 100644 --- a/pyslurm/db/step.pyx +++ b/pyslurm/db/step.pyx @@ -57,9 +57,14 @@ cdef class JobStep: wrap.stats = JobStatistics.from_step(wrap) return wrap - def as_dict(self): + def to_dict(self): + """Convert Database JobStep information to a dictionary. + + Returns: + (dict): Database JobStep information as dict + """ cdef dict out = instance_to_dict(self) - out["stats"] = self.stats.as_dict() + out["stats"] = self.stats.to_dict() return out @property diff --git a/pyslurm/db/tres.pxd b/pyslurm/db/tres.pxd index 41ed1b4d..23b44ad2 100644 --- a/pyslurm/db/tres.pxd +++ b/pyslurm/db/tres.pxd @@ -42,7 +42,7 @@ from pyslurm.db.connection cimport Connection cdef find_tres_count(char *tres_str, typ, on_noval=*, on_inf=*) cdef find_tres_limit(char *tres_str, typ) cdef merge_tres_str(char **tres_str, typ, val) -cdef _tres_ids_to_names(char *tres_str, dict tres_data) +cdef _tres_ids_to_names(char *tres_str, TrackableResources tres_data) cdef _set_tres_limits(char **dest, TrackableResourceLimits src, TrackableResources tres_data) @@ -62,14 +62,14 @@ cdef class TrackableResourceLimits: license @staticmethod - cdef from_ids(char *tres_id_str, dict tres_data) + cdef from_ids(char *tres_id_str, TrackableResources tres_data) cdef class TrackableResourceFilter: cdef slurmdb_tres_cond_t *ptr -cdef class TrackableResources(list): +cdef class TrackableResources(dict): cdef public raw_str @staticmethod diff --git a/pyslurm/db/tres.pyx b/pyslurm/db/tres.pyx index df93dda0..78195654 100644 --- a/pyslurm/db/tres.pyx +++ b/pyslurm/db/tres.pyx @@ -25,7 +25,7 @@ from pyslurm.utils.uint import * from pyslurm.constants import UNLIMITED from pyslurm.core.error import RPCError -from pyslurm.utils.helpers import instance_to_dict, collection_to_dict_global +from pyslurm.utils.helpers import instance_to_dict from pyslurm.utils import cstr from pyslurm.db.connection import _open_conn_or_error import json @@ -56,7 +56,7 @@ cdef class TrackableResourceLimits: setattr(self, k, v) @staticmethod - cdef from_ids(char *tres_id_str, dict tres_data): + cdef from_ids(char *tres_id_str, TrackableResources tres_data): tres_list = _tres_ids_to_names(tres_id_str, tres_data) if not tres_list: return None @@ -76,7 +76,7 @@ cdef class TrackableResourceLimits: return out def _validate(self, TrackableResources tres_data): - id_dict = _tres_names_to_ids(self.as_dict(flatten_limits=True), + id_dict = _tres_names_to_ids(self.to_dict(flatten_limits=True), tres_data) return id_dict @@ -91,7 +91,7 @@ cdef class TrackableResourceLimits: return out - def as_dict(self, flatten_limits=False): + def to_dict(self, flatten_limits=False): cdef dict inst_dict = instance_to_dict(self) if flatten_limits: @@ -134,36 +134,21 @@ cdef class TrackableResourceFilter: self._alloc() -cdef class TrackableResources(list): +cdef class TrackableResources(dict): def __init__(self): pass - def as_dict(self, recursive=False, name_is_key=True): - """Convert the collection data to a dict. + @staticmethod + def load(Connection db_connection=None, name_is_key=True): + """Load Trackable Resources from the Database. Args: - recursive (bool, optional): - By default, the objects will not be converted to a dict. If - this is set to `True`, then additionally all objects are - converted to dicts. name_is_key (bool, optional): By default, the keys in this dict are the names of each TRES. If this is set to `False`, then the unique ID of the TRES will be used as dict keys. - - Returns: - (dict): Collection as a dict. """ - identifier = TrackableResource.type_and_name - if not name_is_key: - identifier = TrackableResource.id - - return collection_to_dict_global(self, identifier=identifier, - recursive=recursive) - - @staticmethod - def load(Connection db_connection=None): cdef: TrackableResources out = TrackableResources() TrackableResource tres @@ -188,7 +173,8 @@ cdef class TrackableResources(list): for tres_ptr in SlurmList.iter_and_pop(tres_data): tres = TrackableResource.from_ptr( tres_ptr.data) - out.append(tres) + _id = tres.type_and_name if name_is_key else tres.id + out[_id] = tres return out @@ -246,7 +232,7 @@ cdef class TrackableResource: wrap.ptr = in_ptr return wrap - def as_dict(self): + def to_dict(self): return instance_to_dict(self) @property @@ -307,7 +293,7 @@ cdef merge_tres_str(char **tres_str, typ, val): cstr.from_dict(tres_str, current) -cdef _tres_ids_to_names(char *tres_str, dict tres_data): +cdef _tres_ids_to_names(char *tres_str, TrackableResources tres_data): if not tres_str: return None @@ -342,7 +328,7 @@ def _tres_names_to_ids(dict tres_dict, TrackableResources tres_data): def _validate_tres_single(tid, TrackableResources tres_data): - for tres in tres_data: + for tres in tres_data.values(): if tid == tres.id or tid == tres.type_and_name: return tres.id diff --git a/pyslurm/db/cluster.pyx b/pyslurm/settings.pyx similarity index 92% rename from pyslurm/db/cluster.pyx rename to pyslurm/settings.pyx index 436183a8..5085a9f5 100644 --- a/pyslurm/db/cluster.pyx +++ b/pyslurm/settings.pyx @@ -1,5 +1,5 @@ ######################################################################### -# cluster.pyx - pyslurm slurmdbd cluster api +# settings.pyx - pyslurm global settings ######################################################################### # Copyright (C) 2023 Toni Harzendorf # @@ -23,6 +23,8 @@ # cython: language_level=3 from pyslurm.core import slurmctld +from pyslurm cimport slurm +from pyslurm.utils cimport cstr LOCAL_CLUSTER = cstr.to_unicode(slurm.slurm_conf.cluster_name) diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index fb1d2201..9fcd5896 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -341,60 +341,6 @@ def instance_to_dict(inst): return out -def collection_to_dict(collection, identifier, recursive=False, group_id=None): - cdef dict out = {} - - for item in collection: - cluster = item.cluster - if cluster not in out: - out[cluster] = {} - - _id = identifier.__get__(item) - data = item if not recursive else item.as_dict() - - if group_id: - grp_id = group_id.__get__(item) - if grp_id not in out[cluster]: - out[cluster][grp_id] = {} - out[cluster][grp_id].update({_id: data}) - else: - out[cluster][_id] = data - - return out - - -def collection_to_dict_global(collection, identifier, recursive=False): - cdef dict out = {} - for item in collection: - _id = identifier.__get__(item) - out[_id] = item if not recursive else item.as_dict() - return out - - -def group_collection_by_cluster(collection): - cdef dict out = {} - collection_type = type(collection) - - for item in collection: - cluster = item.cluster - if cluster not in out: - out[cluster] = collection_type() - - out[cluster].append(item) - - return out - - -def _sum_prop(obj, name, startval=0): - val = startval - for n in obj.values(): - v = name.__get__(n) - if v is not None: - val += v - - return val - - def _get_exit_code(exit_code): exit_state=sig = 0 if exit_code != slurm.NO_VAL: diff --git a/pyslurm/xcollections.pxd b/pyslurm/xcollections.pxd new file mode 100644 index 00000000..24007da7 --- /dev/null +++ b/pyslurm/xcollections.pxd @@ -0,0 +1,93 @@ +######################################################################### +# collections.pxd - pyslurm custom collections +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 + + +cdef class MultiClusterMap: + """Mapping of Multi-Cluster Data for a Collection. + + !!! note "TL;DR" + + If you have no need to write Multi-Cluster capable code and just work + on a single Cluster, Collections inheriting from this Class behave + just like a normal `dict`. + + This class enables collections to hold data from multiple Clusters if + applicable. + For quite a few Entities in Slurm it is possible to gather data from + multiple Clusters. For example, with `squeue`, you can easily list Jobs + running on different Clusters - provided your Cluster is joined in a + Federation or simply part of a multi Cluster Setup. + + Collections like `pyslurm.Jobs` inherit from this Class to enable holding + such data from multiple Clusters. + Internally, the data is structured in a `dict` like this (with + `pyslurm.Jobs` as an example): + + ```python + data = { + "LOCAL_CLUSTER": + 1: pyslurm.Job, + 2: pyslurm.Job, + ... + "OTHER_REMOTE_CLUSTER": + 100: pyslurm.Job, + 101, pyslurm.Job + ... + ... + } + ``` + + When a collection inherits from this class, its functionality will + basically simulate a standard `dict` - with a few extensions to enable + multi-cluster code. + By default, even if your Collections contains Data from multiple Clusters, + any operation will be targeted on the local Cluster data, if available. + + For example, with the data from above: + + ```python + job = data[1] + ``` + + `job` would then hold the instance for Job 1 from the `LOCAL_CLUSTER` + data. + Alternatively, data can also be accessed like this: + + ```python + job = data["OTHER_REMOTE_CLUSTER"][100] + ``` + + Here, you are directly specifying which Cluster data you want to access. + + Similarly, every method (where applicable) from a standard dict is + extended with multi-cluster functionality (check out the examples on the + methods) + """ + cdef public dict data + + cdef: + _typ + _key_type + _val_type + _id_attr diff --git a/pyslurm/xcollections.pyx b/pyslurm/xcollections.pyx new file mode 100644 index 00000000..8be67d29 --- /dev/null +++ b/pyslurm/xcollections.pyx @@ -0,0 +1,581 @@ +######################################################################### +# collections.pyx - pyslurm custom collections +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# cython: c_string_type=unicode, c_string_encoding=default +# cython: language_level=3 +"""Custom Collection utilities""" + +from pyslurm.settings import LOCAL_CLUSTER +import json +from typing import Union, Any + + +class BaseView: + """Base View for all other Views""" + def __init__(self, mcm): + self._mcm = mcm + self._data = mcm.data + + def __len__(self): + return len(self._mcm) + + def __repr__(self): + data = ", ".join(map(repr, self)) + return f'{self.__class__.__name__}([{data}])' + + +class ValuesView(BaseView): + """A simple Value View + + When iterating over an instance of this View, this will yield all values + from all clusters. + """ + def __contains__(self, val): + try: + item = self._mcm.get( + key=self._mcm._item_id(val), + cluster=val.cluster + ) + return item is val or item == val + except AttributeError: + pass + + return False + + def __iter__(self): + for cluster in self._mcm.data.values(): + for item in cluster.values(): + yield item + + +class ClustersView(BaseView): + """A simple Cluster-Keys View + + When iterating over an instance of this View, it will yield all the + Cluster names of the collection. + """ + def __contains__(self, item): + return item in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + yield from self._data + + +class MCKeysView(BaseView): + """A Multi-Cluster Keys View + + Unlike KeysView, when iterating over an MCKeysView instance, this will + yield a 2-tuple in the form `(cluster, key)`. + + Similarly, when checking whether this View contains a Key with the `in` + operator, a 2-tuple must be used in the form described above. + """ + def __contains__(self, item): + cluster, key, = item + return key in self._data[cluster] + + def __iter__(self): + for cluster, keys in self._data.items(): + for key in keys: + yield (cluster, key) + + +class KeysView(BaseView): + """A simple Keys View of a collection + + When iterating, this yields all the keys found from each Cluster in the + collection. Note that unlike the KeysView from a `dict`, the keys here + aren't unique and may appear multiple times. + + If you indeed have multiple Clusters in a collection and need to tell the + keys apart, use the `with_cluster()` function. + """ + def __contains__(self, item): + return item in self._mcm + + def __iter__(self): + for cluster, keys in self._data.items(): + yield from keys + + def with_cluster(self): + """Return a Multi-Cluster Keys View. + + Returns: + (MCKeysView): Multi-Cluster Keys View. + """ + return MCKeysView(self._mcm) + + +class ItemsView(BaseView): + """A simple Items View of a collection. + + Returns a 2-tuple in the form of `(key, value)` when iterating. + + Similarly, when checking whether this View contains an Item with the `in` + operator, a 2-tuple must be used. + """ + def __contains__(self, item): + key, val = item + + try: + out = self._mcm.data[item.cluster][key] + except (KeyError, AttributeError): + return False + else: + return out is val or out == val + + def __iter__(self): + for cluster, data in self._mcm.data.items(): + for key in data: + yield (key, data[key]) + + def with_cluster(self): + """Return a Multi-Cluster Items View. + + Returns: + (MCItemsView): Multi-Cluster Items View. + """ + return MCItemsView(self._mcm) + + +class MCItemsView(BaseView): + """A Multi-Cluster Items View. + + This differs from ItemsView in that it returns a 3-tuple in the form of + `(cluster, key, value)` when iterating. + + Similarly, when checking whether this View contains an Item with the `in` + operator, a 3-tuple must be used. + """ + def __contains__(self, item): + cluster, key, val = item + + try: + out = self._mcm.data[cluster][key] + except KeyError: + return False + else: + return out is val or out == val + + def __iter__(self): + for cluster, data in self._mcm.data.items(): + for key in data: + yield (cluster, key, data[key]) + + +cdef class MultiClusterMap: + + def __init__(self, data, typ=None, val_type=None, + key_type=None, id_attr=None, init_data=True): + self.data = {} if init_data else data + self._typ = typ + self._key_type = key_type + self._val_type = val_type + self._id_attr = id_attr + if init_data: + self._init_data(data) + + def _init_data(self, data): + if isinstance(data, list): + for item in data: + if isinstance(item, self._key_type): + item = self._val_type(item) + if LOCAL_CLUSTER not in self.data: + self.data[LOCAL_CLUSTER] = {} + + self.data[LOCAL_CLUSTER].update({self._item_id(item): item}) + elif isinstance(data, str): + itemlist = data.split(",") + items = {self._key_type(item):self._val_type(item) + for item in itemlist} + self.data[LOCAL_CLUSTER] = items + elif isinstance(data, dict): + self.update(data) + elif data is not None: + raise TypeError(f"Invalid Type: {type(data).__name__}") + + def _check_for_value(self, val_id, cluster): + cluster_data = self.data.get(cluster) + if cluster_data and val_id in cluster_data: + return True + return False + + def _get_cluster(self): + cluster = None + if not self.data or LOCAL_CLUSTER in self.data: + cluster = LOCAL_CLUSTER + else: + try: + cluster = next(iter(self.keys())) + except StopIteration: + raise KeyError("Collection is Empty") from None + + return cluster + + def _get_key_and_cluster(self, item): + if isinstance(item, self._val_type): + cluster, key = item.cluster, self._item_id(item) + elif isinstance(item, tuple) and len(item) == 2: + cluster, key = item + else: + cluster, key = self._get_cluster(), item + + return cluster, key + + def _check_val_type(self, item): + if not isinstance(item, self._val_type): + raise TypeError(f"Invalid Type: {type(item).__name__}. " + f"{self._val_type}.__name__ is required.") + + def _item_id(self, item): + return self._id_attr.__get__(item) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.data == other.data + return NotImplemented + + def __getitem__(self, item): + if item in self.data: + return self.data[item] + + cluster, key = self._get_key_and_cluster(item) + return self.data[cluster][key] + + def __setitem__(self, where, item): + if where in self.data: + self.data[where] = item + else: + cluster, key = self._get_key_and_cluster(where) + self.data[cluster][key] = item + + def __delitem__(self, item): + if item in self.data: + del self.data[item] + else: + cluster, key = self._get_key_and_cluster(item) + del self.data[cluster][key] + + def __len__(self): + return sum(len(data) for data in self.data.values()) + + def __repr__(self): + return f'{self._typ}([{", ".join(map(repr, self.values()))}])' + + def __contains__(self, item): + if isinstance(item, self._val_type): + item = (item.cluster, self._item_id(item)) + return self.get(item, default=None) is not None + # return self._check_for_value(self._item_id(item), item.cluster) + elif isinstance(item, self._key_type): + found = False + for cluster, data in self.data.items(): + if item in data: + found = True + return found + elif isinstance(item, tuple): + return self.get(item, default=None) is not None + # return self._check_for_value(item, cluster) + + return False + + def __iter__(self): + return iter(self.keys()) + + def __bool__(self): + return bool(self.data) + + def __copy__(self): + return self.copy() + + def copy(self): + """Return a Copy of this instance.""" + out = self.__class__.__new__(self.__class__) + super(self.__class__, out).__init__( + data=self.data.copy(), + typ=self._typ, + key_type=self._key_type, + val_type=self._val_type, + init_data=False, + ) + return out + + def get(self, key, default=None): + """Get the specific value for a Key + + This behaves like `dict`'s `get` method, with the difference that you + can additionally pass in a 2-tuple in the form of `(cluster, key)` as + the key, which can be helpful if this collection contains data from + multiple Clusters. + + If just a key without notion of the Cluster is given, access to the + local cluster data is implied. If this collection does however not + contain data from the local cluster, the first cluster detected + according to `next(iter(self.keys()))` will be used. + + Examples: + Get a Job from the LOCAL_CLUSTER + + >>> job_id = 1 + >>> job = data.get(job_id) + + Get a Job from another Cluster in the Collection, by providing a + 2-tuple with the cluster identifier: + + >>> job_id = 1 + >>> job = data.get(("REMOTE_CLUSTER", job_id)) + """ + cluster, key = self._get_key_and_cluster(key) + return self.data.get(cluster, {}).get(key, default) + + def add(self, item): + """An Item to add to the collection + + Note that a collection can only hold its specific type. + For example, a collection of `pyslurm.Jobs` can only hold + `pyslurm.Job` objects. Trying to add anything other than the accepted + type will raise a TypeError. + + Args: + item (Any): + Item to add to the collection. + + Raises: + TypeError: When an item with an unexpected type not belonging to + the collection was added. + + Examples: + Add a `pyslurm.Job` instance to the `Jobs` collection. + + >>> data = pyslurm.Jobs() + >>> job = pyslurm.Job(1) + >>> data.add(job) + >>> print(data) + Jobs([Job(1)]) + """ + if item.cluster not in self.data: + self.data[item.cluster] = {} + + self._check_val_type(item) + self.data[item.cluster][self._item_id(item)] = item + + def to_json(self, multi_cluster=False): + """Convert the collection to JSON. + + Returns: + (str): JSON formatted string from `json.dumps()` + """ + data = multi_dict_recursive(self) + if multi_cluster: + return json.dumps(data) + else: + cluster = self._get_cluster() + return json.dumps(data[cluster]) + + def keys(self): + """Return a View of all the Keys in this collection + + Returns: + (KeysView): View of all Keys + + Examples: + Iterate over all Keys from all Clusters: + + >>> for key in collection.keys() + ... print(key) + + Iterate over all Keys from all Clusters with the name of the + Cluster additionally provided: + + >>> for cluster, key in collection.keys().with_cluster() + ... print(cluster, key) + """ + return KeysView(self) + + def items(self): + """Return a View of all the Values in this collection + + Returns: + (ItemsView): View of all Items + + Examples: + Iterate over all Items from all Clusters: + + >>> for key, value in collection.items() + ... print(key, value) + + Iterate over all Items from all Clusters with the name of the + Cluster additionally provided: + + >>> for cluster, key, value in collection.items().with_cluster() + ... print(cluster, key, value) + """ + return ItemsView(self) + + def values(self): + """Return a View of all the Values in this collection + + Returns: + (ValuesView): View of all Values + + Examples: + Iterate over all Values from all Clusters: + + >>> for value in collection.values() + ... print(value) + """ + return ValuesView(self) + + def clusters(self): + """Return a View of all the Clusters in this collection + + Returns: + (ClustersView): View of Cluster keys + + Examples: + Iterate over all Cluster-Names the Collection contains: + + >>> for cluster in collection.clusters() + ... print(cluster) + """ + return ClustersView(self) + + def popitem(self): + """Remove and return a `(key, value)` pair as a 2-tuple""" + try: + item = next(iter(self.values())) + except StopIteration: + raise KeyError from None + + key = self._item_id(item) + del self.data[item.cluster][key] + return (key, item) + + def clear(self): + """Clear the collection""" + self.data.clear() + + def pop(self, key, default=None): + """Remove key from the collection and return the value + + This behaves like `dict`'s `pop` method, with the difference that you + can additionally pass in a 2-tuple in the form of `(cluster, key)` as + the key, which can be helpful if this collection contains data from + multiple Clusters. + + If just a key without notion of the Cluster is given, access to the + local cluster data is implied. If this collection does however not + contain data from the local cluster, the first cluster detected + according to `next(iter(self.keys()))` will be used. + """ + item = self.get(key, default=default) + if item is default or item == default: + return default + + cluster = item.cluster + del self.data[cluster][key] + if not self.data[cluster]: + del self.data[cluster] + + return item + + def _update(self, data): + for key in data: + try: + iterator = iter(data[key]) + except TypeError as e: + cluster = self._get_cluster() + if not cluster in self.data: + self.data[cluster] = {} + self.data[cluster].update(data) + break + else: + cluster = key + if not cluster in self.data: + self.data[cluster] = {} + self.data[cluster].update(data[cluster]) +# col = data[cluster] +# if hasattr(col, "keys") and callable(col.keys): +# for k in col.keys(): + +# else: +# for item in col: +# k, v = item + + + def update(self, data={}, **kwargs): + """Update the collection. + + This functions like `dict`'s `update` method. + """ + self._update(data) + self._update(kwargs) + + +def multi_reload(cur, frozen=True): + if not cur: + return cur + + new = cur.__class__.load() + for cluster, item in list(cur.keys().with_cluster()): + if (cluster, item) in new.keys().with_cluster(): + cur[cluster][item] = new.pop(item, cluster) + elif not frozen: + del cur[cluster][item] + + if not frozen: + for cluster, item in new.keys().with_cluster(): + if (cluster, item) not in cur.keys().with_cluster(): + cur[cluster][item] = new[cluster][item] + + return cur + + +def dict_recursive(collection): + cdef dict out = {} + for item_id, item in collection.items(): + if hasattr(item, "to_dict"): + out[item_id] = item.to_dict() + return out + + +def to_json(collection): + return json.dumps(dict_recursive(collection)) + + +def multi_dict_recursive(collection): + cdef dict out = collection.data.copy() + for cluster, data in collection.data.items(): + out[cluster] = dict_recursive(data) + return out + + +def sum_property(collection, prop, startval=0): + out = startval + for item in collection.values(): + data = prop.__get__(item) + if data is not None: + out += data + + return out diff --git a/tests/integration/test_db_job.py b/tests/integration/test_db_job.py index 571ec0d2..1ea59690 100644 --- a/tests/integration/test_db_job.py +++ b/tests/integration/test_db_job.py @@ -49,7 +49,7 @@ def test_parse_all(submit_job): job = submit_job() util.wait() db_job = pyslurm.db.Job.load(job.id) - job_dict = db_job.as_dict() + job_dict = db_job.to_dict() assert job_dict["stats"] assert job_dict["steps"] diff --git a/tests/integration/test_db_qos.py b/tests/integration/test_db_qos.py index 11d9e870..e1cde024 100644 --- a/tests/integration/test_db_qos.py +++ b/tests/integration/test_db_qos.py @@ -38,7 +38,7 @@ def test_load_single(): def test_parse_all(submit_job): qos = pyslurm.db.QualityOfService.load("normal") - qos_dict = qos.as_dict() + qos_dict = qos.to_dict() assert qos_dict assert qos_dict["name"] == qos.name diff --git a/tests/integration/test_job.py b/tests/integration/test_job.py index cef42daf..9788af45 100644 --- a/tests/integration/test_job.py +++ b/tests/integration/test_job.py @@ -35,9 +35,7 @@ def test_parse_all(submit_job): job = submit_job() - # Use the as_dict() function to test if parsing works for all - # properties on a simple Job without error. - Job.load(job.id).as_dict() + Job.load(job.id).to_dict() def test_load(submit_job): @@ -150,7 +148,7 @@ def test_get_job_queue(submit_job): # Submit 10 jobs, gather the job_ids in a list job_list = [submit_job() for i in range(10)] - jobs = Jobs.load().as_dict() + jobs = Jobs.load() for job in job_list: # Check to see if all the Jobs we submitted exist assert job.id in jobs diff --git a/tests/integration/test_job_steps.py b/tests/integration/test_job_steps.py index b24409f5..8d13ba9f 100644 --- a/tests/integration/test_job_steps.py +++ b/tests/integration/test_job_steps.py @@ -102,7 +102,7 @@ def test_collection(submit_job): job = submit_job(script=create_job_script_multi_step()) time.sleep(util.WAIT_SECS_SLURMCTLD) - steps = JobSteps.load(job).as_dict() + steps = JobSteps.load(job) assert steps # We have 3 Steps: batch, 0 and 1 @@ -116,7 +116,7 @@ def test_cancel(submit_job): job = submit_job(script=create_job_script_multi_step()) time.sleep(util.WAIT_SECS_SLURMCTLD) - steps = JobSteps.load(job).as_dict() + steps = JobSteps.load(job) assert len(steps) == 3 assert ("batch" in steps and 0 in steps and @@ -125,7 +125,7 @@ def test_cancel(submit_job): steps[0].cancel() time.sleep(util.WAIT_SECS_SLURMCTLD) - steps = JobSteps.load(job).as_dict() + steps = JobSteps.load(job) assert len(steps) == 2 assert ("batch" in steps and 1 in steps) @@ -173,8 +173,5 @@ def test_load_with_wrong_step_id(submit_job): def test_parse_all(submit_job): job = submit_job() - - # Use the as_dict() function to test if parsing works for all - # properties on a simple JobStep without error. time.sleep(util.WAIT_SECS_SLURMCTLD) - JobStep.load(job, "batch").as_dict() + JobStep.load(job, "batch").to_dict() diff --git a/tests/integration/test_node.py b/tests/integration/test_node.py index 49a69db2..a1c9f6b6 100644 --- a/tests/integration/test_node.py +++ b/tests/integration/test_node.py @@ -29,7 +29,7 @@ def test_load(): - name = Nodes.load()[0].name + name, _ = Nodes.load().popitem() # Now load the node info node = Node.load(name) @@ -56,7 +56,7 @@ def test_create(): def test_modify(): - node = Node(Nodes.load()[0].name) + _, node = Nodes.load().popitem() node.modify(Node(weight=10000)) assert Node.load(node.name).weight == 10000 @@ -69,4 +69,5 @@ def test_modify(): def test_parse_all(): - Node.load(Nodes.load()[0].name).as_dict() + _, node = Nodes.load().popitem() + assert node.to_dict() diff --git a/tests/integration/test_partition.py b/tests/integration/test_partition.py index 8d7a4de4..712eeaff 100644 --- a/tests/integration/test_partition.py +++ b/tests/integration/test_partition.py @@ -28,7 +28,7 @@ def test_load(): - part = Partitions.load()[0] + name, part = Partitions.load().popitem() assert part.name assert part.state @@ -49,7 +49,7 @@ def test_create_delete(): def test_modify(): - part = Partitions.load()[0] + _, part = Partitions.load().popitem() part.modify(Partition(default_time=120)) assert Partition.load(part.name).default_time == 120 @@ -57,8 +57,8 @@ def test_modify(): part.modify(Partition(default_time="1-00:00:00")) assert Partition.load(part.name).default_time == 24*60 - part.modify(Partition(default_time="UNLIMITED")) - assert Partition.load(part.name).default_time == "UNLIMITED" + part.modify(Partition(max_time="UNLIMITED")) + assert Partition.load(part.name).max_time == "UNLIMITED" part.modify(Partition(state="DRAIN")) assert Partition.load(part.name).state == "DRAIN" @@ -68,23 +68,23 @@ def test_modify(): def test_parse_all(): - Partitions.load()[0].as_dict() + _, part = Partitions.load().popitem() + assert part.to_dict() def test_reload(): _partnames = [util.randstr() for i in range(3)] _tmp_parts = Partitions(_partnames) - for part in _tmp_parts: + for part in _tmp_parts.values(): part.create() all_parts = Partitions.load() assert len(all_parts) >= 3 my_parts = Partitions(_partnames[1:]).reload() - print(my_parts) assert len(my_parts) == 2 - for part in my_parts: + for part in my_parts.values(): assert part.state != "UNKNOWN" - for part in _tmp_parts: + for part in _tmp_parts.values(): part.delete() diff --git a/tests/unit/test_collection.py b/tests/unit/test_collection.py new file mode 100644 index 00000000..ccb27779 --- /dev/null +++ b/tests/unit/test_collection.py @@ -0,0 +1,328 @@ +######################################################################### +# test_collection.py - custom collection unit tests +######################################################################### +# Copyright (C) 2023 Toni Harzendorf +# +# This file is part of PySlurm +# +# PySlurm is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# PySlurm is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with PySlurm; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""test_collection.py - Unit test custom collection functionality.""" + +import pytest +import pyslurm +from pyslurm.xcollections import sum_property + +LOCAL_CLUSTER = pyslurm.settings.LOCAL_CLUSTER +OTHER_CLUSTER = "other_cluster" + + +class TestMultiClusterMap: + + def _create_collection(self): + data = { + LOCAL_CLUSTER: { + 1: pyslurm.db.Job(1), + 2: pyslurm.db.Job(2), + }, + OTHER_CLUSTER: { + 1: pyslurm.db.Job(1, cluster="other_cluster"), + 10: pyslurm.db.Job(10, cluster="other_cluster"), + } + } + col = pyslurm.db.Jobs() + col.update(data) + return col + + def test_create(self): + jobs = pyslurm.db.Jobs("101,102") + assert len(jobs) == 2 + assert 101 in jobs + assert 102 in jobs + assert jobs[101].id == 101 + assert jobs[102].id == 102 + + jobs = pyslurm.db.Jobs([101, 102]) + assert len(jobs) == 2 + assert 101 in jobs + assert 102 in jobs + assert jobs[101].id == 101 + assert jobs[102].id == 102 + + jobs = pyslurm.db.Jobs( + { + 101: pyslurm.db.Job(101), + 102: pyslurm.db.Job(102), + } + ) + assert len(jobs) == 2 + assert 101 in jobs + assert 102 in jobs + assert jobs[101].id == 101 + assert jobs[102].id == 102 + assert True + + def test_add(self): + col = self._create_collection() + col_len = len(col) + + item = pyslurm.db.Job(20) + col.add(item) + + assert len(col[LOCAL_CLUSTER]) == 3 + assert len(col) == col_len+1 + + item = pyslurm.db.Job(20, cluster=OTHER_CLUSTER) + col.add(item) + + assert len(col[LOCAL_CLUSTER]) == 3 + assert len(col) == col_len+2 + + def test_get(self): + col = self._create_collection() + + item = col.get(1) + assert item is not None + assert isinstance(item, pyslurm.db.Job) + assert item.cluster == LOCAL_CLUSTER + + item = col.get((OTHER_CLUSTER, 1)) + assert item is not None + assert isinstance(item, pyslurm.db.Job) + assert item.cluster == OTHER_CLUSTER + + item = col.get(30) + assert item is None + + def test_keys(self): + col = self._create_collection() + + keys = col.keys() + keys_with_cluster = keys.with_cluster() + assert len(keys) == len(col) + + for k in keys: + assert k + + for cluster, k in keys_with_cluster: + assert cluster + assert cluster in col.data + assert k + + def test_values(self): + col = self._create_collection() + values = col.values() + + assert len(values) == len(col) + + for item in values: + assert item + print(item) + assert isinstance(item, pyslurm.db.Job) + assert item.cluster in col.data + + def test_getitem(self): + col = self._create_collection() + + item1 = col[LOCAL_CLUSTER][1] + item2 = col[1] + item3 = col[OTHER_CLUSTER][1] + + assert item1 + assert item2 + assert item3 + assert item1 == item2 + assert item1 != item3 + + with pytest.raises(KeyError): + item = col[30] + + with pytest.raises(KeyError): + item = col[OTHER_CLUSTER][30] + + def test_setitem(self): + col = self._create_collection() + col_len = len(col) + + item = pyslurm.db.Job(30) + col[item.id] = item + assert len(col[LOCAL_CLUSTER]) == 3 + assert len(col) == col_len+1 + + item = pyslurm.db.Job(50, cluster=OTHER_CLUSTER) + col[OTHER_CLUSTER][item.id] = item + assert len(col[OTHER_CLUSTER]) == 3 + assert len(col) == col_len+2 + + item = pyslurm.db.Job(100, cluster=OTHER_CLUSTER) + col[item] = item + assert len(col[OTHER_CLUSTER]) == 4 + assert len(col) == col_len+3 + + item = pyslurm.db.Job(101, cluster=OTHER_CLUSTER) + col[(item.cluster, item.id)] = item + assert len(col[OTHER_CLUSTER]) == 5 + assert len(col) == col_len+4 + + new_other_data = { + 1: pyslurm.db.Job(1), + 2: pyslurm.db.Job(2), + } + col[OTHER_CLUSTER] = new_other_data + assert len(col[OTHER_CLUSTER]) == 2 + assert len(col[LOCAL_CLUSTER]) == 3 + assert 1 in col[OTHER_CLUSTER] + assert 2 in col[OTHER_CLUSTER] + + def test_delitem(self): + col = self._create_collection() + col_len = len(col) + + del col[1] + assert len(col[LOCAL_CLUSTER]) == 1 + assert len(col) == col_len-1 + + del col[OTHER_CLUSTER][1] + assert len(col[OTHER_CLUSTER]) == 1 + assert len(col) == col_len-2 + + del col[OTHER_CLUSTER] + assert len(col) == 1 + assert OTHER_CLUSTER not in col.data + + def test_copy(self): + col = self._create_collection() + col_copy = col.copy() + assert col == col_copy + + def test_iter(self): + col = self._create_collection() + for k in col: + assert k + + def test_items(self): + col = self._create_collection() + for k, v in col.items(): + assert k + assert v + assert isinstance(v, pyslurm.db.Job) + + for c, k, v in col.items().with_cluster(): + assert c + assert k + assert v + assert isinstance(v, pyslurm.db.Job) + + def test_popitem(self): + col = self._create_collection() + col_len = len(col) + + key, item = col.popitem() + assert item + assert key + assert isinstance(item, pyslurm.db.Job) + assert len(col) == col_len-1 + + def test_update(self): + col = self._create_collection() + col_len = len(col) + + col_update = { + 30: pyslurm.db.Job(30), + 50: pyslurm.db.Job(50), + } + col.update(col_update) + assert len(col) == col_len+2 + assert len(col[LOCAL_CLUSTER]) == 4 + assert 30 in col + assert 50 in col + + col_update = { + "new_cluster": { + 80: pyslurm.db.Job(80, cluster="new_cluster"), + 50: pyslurm.db.Job(50, cluster="new_cluster"), + } + } + col.update(col_update) + assert len(col) == col_len+4 + assert len(col[LOCAL_CLUSTER]) == 4 + assert len(col["new_cluster"]) == 2 + assert 80 in col + assert 50 in col + + col_update = { + 200: pyslurm.db.Job(200, cluster=OTHER_CLUSTER), + 300: pyslurm.db.Job(300, cluster=OTHER_CLUSTER), + } + col.update({OTHER_CLUSTER: col_update}) + assert len(col) == col_len+6 + assert len(col[OTHER_CLUSTER]) == 4 + assert 200 in col + assert 300 in col + + empty_col = pyslurm.db.Jobs() + empty_col.update(col_update) + assert len(empty_col) == 2 + + def test_pop(self): + col = self._create_collection() + col_len = len(col) + + item = col.pop(1) + assert item + assert item.id == 1 + assert len(col) == col_len-1 + + item = col.pop(999, default="def") + assert item == "def" + + def test_contains(self): + col = self._create_collection() + item = pyslurm.db.Job(1) + assert item in col + + assert 10 in col + assert 20 not in col + + assert (OTHER_CLUSTER, 10) in col + assert (LOCAL_CLUSTER, 10) not in col + + def test_to_json(self): + col = self._create_collection() + data = col.to_json(multi_cluster=True) + assert data + + def test_cluster_view(self): + col = self._create_collection() + assert len(col.clusters()) == 2 + for c in col.clusters(): + assert c + + def test_sum_property(self): + class TestObject: + @property + def memory(self): + return 10240 + + @property + def cpus(self): + return None + + object_dict = {i: TestObject() for i in range(10)} + + expected = 10240 * 10 + assert sum_property(object_dict, TestObject.memory) == expected + + expected = 0 + assert sum_property(object_dict, TestObject.cpus) == expected diff --git a/tests/unit/test_common.py b/tests/unit/test_common.py index 1598d191..cf5353b1 100644 --- a/tests/unit/test_common.py +++ b/tests/unit/test_common.py @@ -55,12 +55,11 @@ nodelist_from_range_str, nodelist_to_range_str, instance_to_dict, - collection_to_dict, - collection_to_dict_global, - group_collection_by_cluster, - _sum_prop, ) from pyslurm.utils import cstr +from pyslurm.xcollections import ( + sum_property, +) class TestStrings: @@ -414,75 +413,3 @@ def test_nodelist_to_range_str(self): assert "node[001,007-009]" == nodelist_to_range_str(nodelist) assert "node[001,007-009]" == nodelist_to_range_str(nodelist_str) - def test_summarize_property(self): - class TestObject: - @property - def memory(self): - return 10240 - - @property - def cpus(self): - return None - - object_dict = {i: TestObject() for i in range(10)} - - expected = 10240 * 10 - assert _sum_prop(object_dict, TestObject.memory) == expected - - expected = 0 - assert _sum_prop(object_dict, TestObject.cpus) == expected - - def test_collection_to_dict(self): - class TestObject: - - def __init__(self, _id, _grp_id, cluster): - self._id = _id - self._grp_id = _grp_id - self.cluster = cluster - - @property - def id(self): - return self._id - - @property - def group_id(self): - return self._grp_id - - def as_dict(self): - return instance_to_dict(self) - - class TestCollection(list): - - def __init__(self, data): - super().__init__() - self.extend(data) - - OFFSET = 100 - RANGE = 10 - - data = [TestObject(x, x+OFFSET, "TestCluster") for x in range(RANGE)] - collection = TestCollection(data) - - coldict = collection_to_dict(collection, identifier=TestObject.id) - coldict = coldict.get("TestCluster", {}) - - assert len(coldict) == RANGE - for i in range(RANGE): - assert i in coldict - assert isinstance(coldict[i], TestObject) - - coldict = collection_to_dict(collection, identifier=TestObject.id, - group_id=TestObject.group_id) - coldict = coldict.get("TestCluster", {}) - - assert len(coldict) == RANGE - for i in range(RANGE): - assert i+OFFSET in coldict - assert i in coldict[i+OFFSET] - - coldict = collection_to_dict(collection, identifier=TestObject.id, - recursive=True) - coldict = coldict.get("TestCluster", {}) - - for item in coldict.values(): - assert isinstance(item, dict) diff --git a/tests/unit/test_db_job.py b/tests/unit/test_db_job.py index 7b77671f..c2ae8bb0 100644 --- a/tests/unit/test_db_job.py +++ b/tests/unit/test_db_job.py @@ -42,38 +42,11 @@ def test_filter(): job_filter._create() -def test_create_collection(): - jobs = pyslurm.db.Jobs("101,102") - assert len(jobs) == 2 - jobs = jobs.as_dict() - assert 101 in jobs - assert 102 in jobs - assert jobs[101].id == 101 - assert jobs[102].id == 102 - - jobs = pyslurm.db.Jobs([101, 102]) - assert len(jobs) == 2 - jobs = jobs.as_dict() - assert 101 in jobs - assert 102 in jobs - assert jobs[101].id == 101 - assert jobs[102].id == 102 - - jobs = pyslurm.db.Jobs( - { - 101: pyslurm.db.Job(101), - 102: pyslurm.db.Job(102), - } - ) - assert len(jobs) == 2 - jobs = jobs.as_dict() - assert 101 in jobs - assert 102 in jobs - assert jobs[101].id == 101 - assert jobs[102].id == 102 - assert True - - def test_create_instance(): job = pyslurm.db.Job(9999) assert job.id == 9999 + + +def test_parse_all(): + job = pyslurm.db.Job(9999) + assert job.to_dict() diff --git a/tests/unit/test_db_qos.py b/tests/unit/test_db_qos.py index 0d2fd538..5ee2db76 100644 --- a/tests/unit/test_db_qos.py +++ b/tests/unit/test_db_qos.py @@ -39,11 +39,6 @@ def test_search_filter(): qos_filter._create() -def test_create_collection_instance(): - # TODO - assert True - - def test_create_instance(): qos = pyslurm.db.QualityOfService("test") assert qos.name == "test" diff --git a/tests/unit/test_job.py b/tests/unit/test_job.py index edcf65d4..863fcfab 100644 --- a/tests/unit/test_job.py +++ b/tests/unit/test_job.py @@ -31,9 +31,7 @@ def test_create_instance(): def test_parse_all(): - # Use the as_dict() function to test if parsing works for all - # properties on a simple Job without error. - Job(9999).as_dict() + assert Job(9999).to_dict() def test_parse_dependencies_to_dict(): diff --git a/tests/unit/test_job_steps.py b/tests/unit/test_job_steps.py index fcd0d012..c8c52352 100644 --- a/tests/unit/test_job_steps.py +++ b/tests/unit/test_job_steps.py @@ -39,6 +39,4 @@ def test_create_instance(): def test_parse_all(): - # Use the as_dict() function to test if parsing works for all - # properties on a simple JobStep without error. - JobStep(9999, 1).as_dict() + assert JobStep(9999, 1).to_dict() diff --git a/tests/unit/test_node.py b/tests/unit/test_node.py index 755e85d9..c4dba73e 100644 --- a/tests/unit/test_node.py +++ b/tests/unit/test_node.py @@ -32,35 +32,7 @@ def test_create_instance(): def test_parse_all(): - Node("localhost").as_dict() - - -def test_create_nodes_collection(): - nodes = Nodes("node1,node2").as_dict() - assert len(nodes) == 2 - assert "node1" in nodes - assert "node2" in nodes - assert nodes["node1"].name == "node1" - assert nodes["node2"].name == "node2" - - nodes = Nodes(["node1", "node2"]).as_dict() - assert len(nodes) == 2 - assert "node1" in nodes - assert "node2" in nodes - assert nodes["node1"].name == "node1" - assert nodes["node2"].name == "node2" - - nodes = Nodes( - { - "node1": Node("node1"), - "node2": Node("node2"), - } - ).as_dict() - assert len(nodes) == 2 - assert "node1" in nodes - assert "node2" in nodes - assert nodes["node1"].name == "node1" - assert nodes["node2"].name == "node2" + assert Node("localhost").to_dict() def test_set_node_state(): diff --git a/tests/unit/test_partition.py b/tests/unit/test_partition.py index 89403ae2..b699893c 100644 --- a/tests/unit/test_partition.py +++ b/tests/unit/test_partition.py @@ -31,36 +31,8 @@ def test_create_instance(): assert part.name == "normal" -def test_create_collection(): - parts = Partitions("part1,part2").as_dict() - assert len(parts) == 2 - assert "part1" in parts - assert "part2" in parts - assert parts["part1"].name == "part1" - assert parts["part2"].name == "part2" - - parts = Partitions(["part1", "part2"]).as_dict() - assert len(parts) == 2 - assert "part1" in parts - assert "part2" in parts - assert parts["part1"].name == "part1" - assert parts["part2"].name == "part2" - - parts = Partitions( - { - "part1": Partition("part1"), - "part2": Partition("part2"), - } - ).as_dict() - assert len(parts) == 2 - assert "part1" in parts - assert "part2" in parts - assert parts["part1"].name == "part1" - assert parts["part2"].name == "part2" - - def test_parse_all(): - Partition("normal").as_dict() + assert Partition("normal").to_dict() def test_parse_memory(): From fe572c791eb08b3ad6c5bfc3004578dcabc7cc0e Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Tue, 18 Jul 2023 21:00:19 +0200 Subject: [PATCH 30/48] More doc improvements and other fixes (#306) * Improved docs + better custom repr * Add __or__, __ror__ and __ior__ to MultiClusterMap * Use from_ptr method when loading single Job/Step/Node --- CHANGELOG.md | 19 +++--- docs/reference/index.md | 2 +- pyslurm/core/job/job.pxd | 4 +- pyslurm/core/job/job.pyx | 19 ++++-- pyslurm/core/job/step.pyx | 20 ++++-- pyslurm/core/job/submission.pxd | 6 +- pyslurm/core/job/submission.pyx | 18 +++-- pyslurm/core/node.pxd | 12 ++-- pyslurm/core/node.pyx | 17 +++-- pyslurm/core/partition.pyx | 2 +- pyslurm/db/assoc.pyx | 2 +- pyslurm/db/connection.pyx | 8 +++ pyslurm/db/job.pxd | 3 +- pyslurm/db/job.pyx | 29 +++++--- pyslurm/db/qos.pyx | 2 +- pyslurm/db/step.pyx | 10 +++ pyslurm/xcollections.pxd | 32 +++++---- pyslurm/xcollections.pyx | 114 +++++++++++++++++++++----------- tests/unit/test_collection.py | 74 ++++++++++++++++++++- 19 files changed, 284 insertions(+), 109 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f6dd4c6..c50aa1b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,15 +11,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Ability to modify Database Jobs - New classes to interact with the Partition API - - [pyslurm.Partition](https://pyslurm.github.io/23.2/reference/partition/#pyslurm.Partition) - - [pyslurm.Partitions](https://pyslurm.github.io/23.2/reference/partition/#pyslurm.Partitions) + - [pyslurm.Partition][] + - [pyslurm.Partitions][] - New attributes for a Database Job: - - extra - - failed_node -- Now possible to initialize a [pyslurm.db.Jobs][] collection with existing job - ids or pyslurm.db.Job objects -- Added `as_dict` function to all Collections + - `extra` + - `failed_node` - Added a new Base Class [MultiClusterMap][pyslurm.xcollections.MultiClusterMap] that some Collections inherit from. +- Added `to_json` function to all Collections ### Fixed @@ -29,9 +27,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - the Job was older than a day ### Changed - -- `JobSearchFilter` has been renamed to `JobFilter` -- Renamed `as_dict` Function of some classes to `to_dict` + +- Improved Docs +- Renamed `JobSearchFilter` to [pyslurm.db.JobFilter][] +- Renamed `as_dict` function of some classes to `to_dict` ## [23.2.1](https://github.com/PySlurm/pyslurm/releases/tag/v23.2.1) - 2023-05-18 diff --git a/docs/reference/index.md b/docs/reference/index.md index 35a6c678..af0ef05e 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -25,7 +25,7 @@ The `pyslurm` package is a wrapper around the Slurm C-API it! -## Functionality already reworked: +## Reworked Classes * Job API * [pyslurm.Job][] diff --git a/pyslurm/core/job/job.pxd b/pyslurm/core/job/job.pxd index 4eb89bde..616db4c9 100644 --- a/pyslurm/core/job/job.pxd +++ b/pyslurm/core/job/job.pxd @@ -69,10 +69,10 @@ cdef class Jobs(MultiClusterMap): """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.Job][] objects. Args: - jobs (Union[list, dict], optional=None): + jobs (Union[list[int], dict[int, pyslurm.Job], str], optional=None): Jobs to initialize this collection with. frozen (bool, optional=False): - Control whether this collection is "frozen" when reloading Job + Control whether this collection is `frozen` when reloading Job information. Attributes: diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index e2915608..8ccc7f66 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -89,6 +89,14 @@ cdef class Jobs(MultiClusterMap): Raises: RPCError: When getting all the Jobs from the slurmctld failed. + + Examples: + >>> import pyslurm + >>> jobs = pyslurm.Jobs.load() + >>> print(jobs) + pyslurm.Jobs({1: pyslurm.Job(1), 2: pyslurm.Job(2)}) + >>> print(jobs[1]) + pyslurm.Job(1) """ cdef: dict passwd = {} @@ -134,6 +142,9 @@ cdef class Jobs(MultiClusterMap): def reload(self): """Reload the information for jobs in a collection. + Returns: + (pyslurm.Partitions): Returns self + Raises: RPCError: When getting the Jobs from the slurmctld failed. """ @@ -203,7 +214,7 @@ cdef class Job: self._dealloc_impl() def __repr__(self): - return f'{self.__class__.__name__}({self.id})' + return f'pyslurm.{self.__class__.__name__}({self.id})' @staticmethod def load(job_id): @@ -233,15 +244,13 @@ cdef class Job: """ cdef: job_info_msg_t *info = NULL - Job wrap = Job.__new__(Job) + Job wrap = None try: verify_rpc(slurm_load_job(&info, job_id, slurm.SHOW_DETAIL)) if info and info.record_count: - # Copy info - wrap._alloc_impl() - memcpy(wrap.ptr, &info.job_array[0], sizeof(slurm_job_info_t)) + wrap = Job.from_ptr(&info.job_array[0]) info.record_count = 0 if not slurm.IS_JOB_PENDING(wrap.ptr): diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index 54cb8f59..4227e901 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -59,6 +59,10 @@ cdef class JobSteps(dict): elif steps is not None: raise TypeError("Invalid Type: {type(steps)}") + def __repr__(self): + data = super().__repr__() + return f'pyslurm.{self.__class__.__name__}({data})' + @staticmethod def load(job): """Load the Job Steps from the system. @@ -69,6 +73,14 @@ cdef class JobSteps(dict): Returns: (pyslurm.JobSteps): JobSteps of the Job + + Examples: + >>> import pyslurm + >>> steps = pyslurm.JobSteps.load(1) + >>> print(steps) + pyslurm.JobSteps({'batch': pyslurm.JobStep('batch')}) + >>> print(steps[1]) + pyslurm.JobStep('batch') """ cdef: Job _job @@ -187,7 +199,7 @@ cdef class JobStep: JobStep.__dict__[name].__set__(self, val) def __repr__(self): - return f'{self.__class__.__name__}({self.id})' + return f'pyslurm.{self.__class__.__name__}({self.id})' @staticmethod def load(job_id, step_id): @@ -214,7 +226,7 @@ cdef class JobStep: """ cdef: job_step_info_response_msg_t *info = NULL - JobStep wrap = JobStep.__new__(JobStep) + JobStep wrap = None job_id = job_id.id if isinstance(job_id, Job) else job_id rc = slurm_get_job_steps(0, job_id, dehumanize_step_id(step_id), @@ -222,9 +234,7 @@ cdef class JobStep: verify_rpc(rc) if info and info.job_step_count == 1: - # Copy new info - wrap._alloc_impl() - memcpy(wrap.ptr, &info.job_steps[0], sizeof(job_step_info_t)) + wrap = JobStep.from_ptr(&info.job_steps[0]) info.job_step_count = 0 slurm_free_job_step_info_response_msg(info) else: diff --git a/pyslurm/core/job/submission.pxd b/pyslurm/core/job/submission.pxd index fdedc8ed..1005a24e 100644 --- a/pyslurm/core/job/submission.pxd +++ b/pyslurm/core/job/submission.pxd @@ -495,9 +495,9 @@ cdef class JobSubmitDescription: standard_in (str): Path to a File acting as standard_in for the batch-script. This is the same as -i/--input from sbatch. - standard_in (str): - Path to a File acting as standard_in for the batch-script. - This is the same as -i/--input from sbatch. + standard_error (str): + Path to a File acting as standard_error for the batch-script. + This is the same as -e/--error from sbatch. standard_output (str): Path to a File to write the Jobs standard_output. This is the same as -o/--output from sbatch. diff --git a/pyslurm/core/job/submission.pyx b/pyslurm/core/job/submission.pyx index df33992b..0c9e699c 100644 --- a/pyslurm/core/job/submission.pyx +++ b/pyslurm/core/job/submission.pyx @@ -73,6 +73,9 @@ cdef class JobSubmitDescription: slurm_init_job_desc_msg(self.ptr) + def __repr__(self): + return f'pyslurm.{self.__class__.__name__}' + def submit(self): """Submit a batch job description. @@ -87,9 +90,12 @@ cdef class JobSubmitDescription: >>> desc = pyslurm.JobSubmitDescription( ... name="test-job", ... cpus_per_task=1, - ... time_limit="10-00:00:00") + ... time_limit="10-00:00:00", + ... script="/path/to/your/submit_script.sh") >>> >>> job_id = desc.submit() + >>> print(job_id) + 99 """ cdef submit_response_msg_t *resp = NULL @@ -112,9 +118,9 @@ cdef class JobSubmitDescription: Args: overwrite (bool): - If set to True, the value from an option found in the + If set to `True`, the value from an option found in the environment will override the current value of the attribute - in this instance. Default is False + in this instance. Default is `False` Examples: Lets consider you want to set the name of the Job, its Account @@ -141,13 +147,13 @@ cdef class JobSubmitDescription: self._parse_env(overwrite) def load_sbatch_options(self, overwrite=False): - """Load values from #SBATCH options in the batch script. + """Load values from `#SBATCH` options in the batch script. Args: overwrite (bool): - If set to True, the value from an option found in the in the + If set to `True`, the value from an option found in the in the batch script will override the current value of the attribute - in this instance. Default is False + in this instance. Default is `False` """ if not self.script: raise ValueError("You need to set the 'script' attribute first.") diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index 5167de78..d889b723 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -62,7 +62,7 @@ cdef class Nodes(MultiClusterMap): """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.Node][] objects. Args: - nodes (Union[list, dict, str], optional=None): + nodes (Union[list[str], dict[str, Node], str], optional=None): Nodes to initialize this collection with. Attributes: @@ -161,8 +161,7 @@ cdef class Node: free_memory (int): Free Memory in Mebibytes on the node. memory_reserved_for_system (int): - Raw Memory in Mebibytes reserved for the System not usable by - Jobs. + Memory in Mebibytes reserved for the System not usable by Jobs. temporary_disk (int): Amount of temporary disk space this node has, in Mebibytes. weight (int): @@ -210,9 +209,10 @@ cdef class Node: external_sensors (dict): External Sensor info for the Node. The dict returned contains the following information: - * joules_total (int) - * current_watts (int) - * temperature (int) + + * `joules_total` (int) + * `current_watts` (int) + * `temperature` (int) state (str): State the node is currently in. next_state (str): diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index eac1bfef..5f8c7e2d 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -121,13 +121,16 @@ cdef class Nodes(MultiClusterMap): return nodes def reload(self): - """Reload the information for nodes in a collection. + """Reload the information for Nodes in a collection. !!! note Only information for nodes which are already in the collection at the time of calling this method will be reloaded. + Returns: + (pyslurm.Nodes): Returns self + Raises: RPCError: When getting the Nodes from the slurmctld failed. """ @@ -246,7 +249,7 @@ cdef class Node: Node.__dict__[name].__set__(self, val) def __repr__(self): - return f'{self.__class__.__name__}({self.name})' + return f'pyslurm.{self.__class__.__name__}({self.name})' @staticmethod cdef Node from_ptr(node_info_t *in_ptr): @@ -271,6 +274,10 @@ cdef class Node: Implements the slurm_load_node_single RPC. + Args: + name (str): + The name of the Node to load. + Returns: (pyslurm.Node): Returns a new Node instance. @@ -285,7 +292,7 @@ cdef class Node: cdef: node_info_msg_t *node_info = NULL partition_info_msg_t *part_info = NULL - Node wrap = Node.__new__(Node) + Node wrap = None try: verify_rpc(slurm_load_node_single(&node_info, @@ -294,9 +301,7 @@ cdef class Node: slurm_populate_node_partitions(node_info, part_info) if node_info and node_info.record_count: - # Copy info - wrap._alloc_impl() - memcpy(wrap.info, &node_info.node_array[0], sizeof(node_info_t)) + wrap = Node.from_ptr(&node_info.node_array[0]) node_info.record_count = 0 else: raise RPCError(msg=f"Node '{name}' does not exist") diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index e1a1b6b1..ba0bf559 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -186,7 +186,7 @@ cdef class Partition: self._dealloc_impl() def __repr__(self): - return f'{self.__class__.__name__}({self.name})' + return f'pyslurm.{self.__class__.__name__}({self.name})' @staticmethod cdef Partition from_ptr(partition_info_t *in_ptr): diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 4e535a46..93617669 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -206,7 +206,7 @@ cdef class Association: slurmdb_init_assoc_rec(self.ptr, 0) def __repr__(self): - return f'{self.__class__.__name__}({self.id})' + return f'pyslurm.db.{self.__class__.__name__}({self.id})' @staticmethod cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr): diff --git a/pyslurm/db/connection.pyx b/pyslurm/db/connection.pyx index 67ef7603..935f921a 100644 --- a/pyslurm/db/connection.pyx +++ b/pyslurm/db/connection.pyx @@ -48,6 +48,10 @@ cdef class Connection: def __dealloc__(self): self.close() + def __repr__(self): + state = "open" if self.is_open else "closed" + return f'pyslurm.db.{self.__class__.__name__} is {state}' + @staticmethod def open(): """Open a new connection to the slurmdbd @@ -61,6 +65,8 @@ cdef class Connection: Examples: >>> import pyslurm >>> connection = pyslurm.db.Connection.open() + >>> print(connection.is_open) + True """ cdef Connection conn = Connection.__new__(Connection) conn.ptr = slurmdb_connection_get(&conn.flags) @@ -77,6 +83,8 @@ cdef class Connection: >>> connection = pyslurm.db.Connection.open() >>> ... >>> connection.close() + >>> print(connection.is_open) + False """ if self.is_open: slurmdb_connection_close(&self.ptr) diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index bf21c003..70ef0311 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -163,7 +163,8 @@ cdef class Job: job_id (int, optional=0): An Integer representing a Job-ID. cluster (str, optional=None): - Name of the Cluster for this Job. + Name of the Cluster for this Job. Default is the name of the local + Cluster. Other Parameters: admin_comment (str): diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 905f206a..6679a77c 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -192,7 +192,7 @@ cdef class Jobs(MultiClusterMap): def __init__(self, jobs=None): super().__init__(data=jobs, - typ="Jobs", + typ="db.Jobs", val_type=Job, id_attr=Job.id, key_type=int) @@ -208,7 +208,8 @@ cdef class Jobs(MultiClusterMap): A search filter that the slurmdbd will apply when retrieving Jobs from the database. db_connection (pyslurm.db.Connection): - An open database connection. + An open database connection. By default if none is specified, + one will be opened automatically. Returns: (pyslurm.db.Jobs): A Collection of database Jobs. @@ -223,6 +224,10 @@ cdef class Jobs(MultiClusterMap): >>> import pyslurm >>> db_jobs = pyslurm.db.Jobs.load() + >>> print(db_jobs) + pyslurm.db.Jobs({1: pyslurm.db.Job(1), 2: pyslurm.db.Job(2)}) + >>> print(db_jobs[1]) + pyslurm.db.Job(1) Now with a Job Filter, so only Jobs that have specific Accounts are returned: @@ -339,13 +344,20 @@ cdef class Jobs(MultiClusterMap): >>> changes = pyslurm.db.Job(comment="A comment for the job") >>> modified_jobs = pyslurm.db.Jobs.modify( ... db_filter, changes, db_conn) - >>> - >>> # Now you can first examine which Jobs have been modified + + Now you can first examine which Jobs have been modified: + >>> print(modified_jobs) [9999] - >>> # And then you can actually commit (or even rollback) the - >>> # changes + + And then you can actually commit the changes: + >>> db_conn.commit() + + You can also explicitly rollback these changes instead of + committing, so they will not become active: + + >>> db_conn.rollback() """ cdef: JobFilter cond @@ -444,7 +456,8 @@ cdef class Job: job_id (int): ID of the Job to be loaded. cluster (str): - Name of the Cluster to search in. + Name of the Cluster to search in. Default is the local + Cluster. with_script (bool): Whether the Job-Script should also be loaded. Mutually exclusive with `with_env`. @@ -520,7 +533,7 @@ cdef class Job: return out def __repr__(self): - return f'{self.__class__.__name__}({self.id})' + return f'pyslurm.db.{self.__class__.__name__}({self.id})' def modify(self, changes, db_connection=None): """Modify a Slurm database Job. diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index 299c0ed9..09819611 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -157,7 +157,7 @@ cdef class QualityOfService: return wrap def __repr__(self): - return f'{self.__class__.__name__}({self.name})' + return f'pyslurm.db.{self.__class__.__name__}({self.name})' def to_dict(self): """Database QualityOfService information formatted as a dictionary. diff --git a/pyslurm/db/step.pyx b/pyslurm/db/step.pyx index e39af066..ee809f01 100644 --- a/pyslurm/db/step.pyx +++ b/pyslurm/db/step.pyx @@ -37,6 +37,13 @@ from pyslurm.utils.helpers import ( from pyslurm.core.job.util import cpu_freq_int_to_str +cdef class JobSteps(dict): + + def __repr__(self): + data = super().__repr__() + return f'pyslurm.db.{self.__class__.__name__}({data})' + + cdef class JobStep: def __cinit__(self): @@ -67,6 +74,9 @@ cdef class JobStep: out["stats"] = self.stats.to_dict() return out + def __repr__(self): + return f'pyslurm.db.{self.__class__.__name__}({self.id})' + @property def num_nodes(self): nnodes = u32_parse(self.ptr.nnodes) diff --git a/pyslurm/xcollections.pxd b/pyslurm/xcollections.pxd index 24007da7..98dfa713 100644 --- a/pyslurm/xcollections.pxd +++ b/pyslurm/xcollections.pxd @@ -35,25 +35,26 @@ cdef class MultiClusterMap: This class enables collections to hold data from multiple Clusters if applicable. For quite a few Entities in Slurm it is possible to gather data from - multiple Clusters. For example, with `squeue`, you can easily list Jobs + multiple Clusters. For example, with `sacct`, you can easily query Jobs running on different Clusters - provided your Cluster is joined in a Federation or simply part of a multi Cluster Setup. - Collections like `pyslurm.Jobs` inherit from this Class to enable holding - such data from multiple Clusters. - Internally, the data is structured in a `dict` like this (with - `pyslurm.Jobs` as an example): + Collections like [pyslurm.db.Jobs][] inherit from this Class to enable + holding such data from multiple Clusters. Internally, the data is + structured in a `dict` like this (with [pyslurm.db.Jobs][] as an example): ```python data = { - "LOCAL_CLUSTER": - 1: pyslurm.Job, - 2: pyslurm.Job, + "LOCAL_CLUSTER": { + 1: pyslurm.db.Job(1), + 2: pyslurm.db.Job(2), ... - "OTHER_REMOTE_CLUSTER": - 100: pyslurm.Job, - 101, pyslurm.Job + }, + "OTHER_REMOTE_CLUSTER": { + 100: pyslurm.db.Job(100), + 101, pyslurm.db.Job(101) ... + }, ... } ``` @@ -70,15 +71,18 @@ cdef class MultiClusterMap: job = data[1] ``` - `job` would then hold the instance for Job 1 from the `LOCAL_CLUSTER` - data. + `job` would then hold the instance for `pyslurm.db.Job(1)` from the + `LOCAL_CLUSTER` data. + Alternatively, data can also be accessed like this: ```python job = data["OTHER_REMOTE_CLUSTER"][100] ``` - Here, you are directly specifying which Cluster data you want to access. + Here, you are directly specifying which Cluster data you want to access, + and you will get the instance for `pyslurm.db.Job(100)` from the + `OTHER_REMOTE_CLUSTER` data. Similarly, every method (where applicable) from a standard dict is extended with multi-cluster functionality (check out the examples on the diff --git a/pyslurm/xcollections.pyx b/pyslurm/xcollections.pyx index 8be67d29..b483cb40 100644 --- a/pyslurm/xcollections.pyx +++ b/pyslurm/xcollections.pyx @@ -251,6 +251,26 @@ cdef class MultiClusterMap: def _item_id(self, item): return self._id_attr.__get__(item) + def _iter_clusters_dict(self, other): + for key in other: + try: + iterator = iter(other[key]) + except TypeError as e: + try: + cluster = self._get_cluster() + except KeyError: + cluster = LOCAL_CLUSTER + + if not cluster in self.data: + self.data[cluster] = {} + yield (cluster, other) + break + else: + cluster = key + if not cluster in self.data: + self.data[cluster] = {} + yield (cluster, other[cluster]) + def __eq__(self, other): if isinstance(other, self.__class__): return self.data == other.data @@ -281,7 +301,8 @@ cdef class MultiClusterMap: return sum(len(data) for data in self.data.values()) def __repr__(self): - return f'{self._typ}([{", ".join(map(repr, self.values()))}])' + data = ", ".join(map(repr, self.data.values())) + return f'pyslurm.{self._typ}({data})' def __contains__(self, item): if isinstance(item, self._val_type): @@ -309,6 +330,44 @@ cdef class MultiClusterMap: def __copy__(self): return self.copy() + def __or__(self, other): + if isinstance(other, MultiClusterMap): + if isinstance(self, dict): + return NotImplemented + + out = self.copy() + out |= other + return out + elif isinstance(other, dict): + out = self.copy() + for cluster, data in self._iter_clusters_dict(other): + out.data[cluster] = self.data[cluster] | data + return out + return NotImplemented + + def __ror__(self, other): + if isinstance(other, MultiClusterMap): + out = other.copy() + out |= self + return out + elif isinstance(other, dict): + out = self.copy() + for cluster, data in self._iter_clusters_dict(other): + out.data[cluster] = data | self.data[cluster] + return out + return NotImplemented + + def __ior__(self, other): + if isinstance(other, MultiClusterMap): + for cluster in other.clusters(): + if not cluster in self.data: + self.data[cluster] = {} + self.data[cluster] |= other.data[cluster] + else: + for cluster, data in self._iter_clusters_dict(other): + self.data[cluster] |= data + return self + def copy(self): """Return a Copy of this instance.""" out = self.__class__.__new__(self.__class__) @@ -353,9 +412,9 @@ cdef class MultiClusterMap: """An Item to add to the collection Note that a collection can only hold its specific type. - For example, a collection of `pyslurm.Jobs` can only hold - `pyslurm.Job` objects. Trying to add anything other than the accepted - type will raise a TypeError. + For example, a collection of [pyslurm.db.Jobs][] can only hold + [pyslurm.db.Job][] objects. Trying to add anything other than the + accepted type will raise a TypeError. Args: item (Any): @@ -366,13 +425,15 @@ cdef class MultiClusterMap: the collection was added. Examples: - Add a `pyslurm.Job` instance to the `Jobs` collection. - - >>> data = pyslurm.Jobs() - >>> job = pyslurm.Job(1) - >>> data.add(job) - >>> print(data) - Jobs([Job(1)]) + Add a `pyslurm.db.Job` instance to the `pyslurm.db.Jobs` + collection. + + >>> import pyslurm + >>> jobs = pyslurm.db.Jobs() + >>> job = pyslurm.db.Job(1) + >>> jobs.add(job) + >>> print(jobs) + pyslurm.db.Jobs({1: pyslurm.db.Job(1)}) """ if item.cluster not in self.data: self.data[item.cluster] = {} @@ -500,37 +561,16 @@ cdef class MultiClusterMap: return item - def _update(self, data): - for key in data: - try: - iterator = iter(data[key]) - except TypeError as e: - cluster = self._get_cluster() - if not cluster in self.data: - self.data[cluster] = {} - self.data[cluster].update(data) - break - else: - cluster = key - if not cluster in self.data: - self.data[cluster] = {} - self.data[cluster].update(data[cluster]) -# col = data[cluster] -# if hasattr(col, "keys") and callable(col.keys): -# for k in col.keys(): - -# else: -# for item in col: -# k, v = item - - def update(self, data={}, **kwargs): """Update the collection. This functions like `dict`'s `update` method. """ - self._update(data) - self._update(kwargs) + for cluster, data in self._iter_clusters_dict(data): + self.data[cluster].update(data) + + for cluster, data in self._iter_clusters_dict(kwargs): + self.data[cluster].update(data) def multi_reload(cur, frozen=True): diff --git a/tests/unit/test_collection.py b/tests/unit/test_collection.py index ccb27779..a29e4f86 100644 --- a/tests/unit/test_collection.py +++ b/tests/unit/test_collection.py @@ -37,8 +37,8 @@ def _create_collection(self): 2: pyslurm.db.Job(2), }, OTHER_CLUSTER: { - 1: pyslurm.db.Job(1, cluster="other_cluster"), - 10: pyslurm.db.Job(10, cluster="other_cluster"), + 1: pyslurm.db.Job(1, cluster=OTHER_CLUSTER), + 10: pyslurm.db.Job(10, cluster=OTHER_CLUSTER), } } col = pyslurm.db.Jobs() @@ -326,3 +326,73 @@ def cpus(self): expected = 0 assert sum_property(object_dict, TestObject.cpus) == expected + + def test_ior(self): + col = self._create_collection() + col_len = len(col) + + other_data = { + LOCAL_CLUSTER: { + 3: pyslurm.db.Job(3), + 2: pyslurm.db.Job(2), + }, + "test_cluster": { + 1000: pyslurm.db.Job(1000, cluster="test_cluster"), + 1001: pyslurm.db.Job(1001, cluster="test_cluster"), + } + } + other_col = pyslurm.db.Jobs() + other_col.update(other_data) + + col |= other_col + assert isinstance(col, pyslurm.xcollections.MultiClusterMap) + assert isinstance(col, pyslurm.db.Jobs) + assert len(col.clusters()) == 3 + assert len(col) == col_len+3 + + dict_data = { + 10: pyslurm.db.Job(10), + 11: pyslurm.db.Job(11), + } + + col |= dict_data + assert isinstance(col, pyslurm.xcollections.MultiClusterMap) + assert isinstance(col, pyslurm.db.Jobs) + assert len(col.clusters()) == 3 + assert len(col[LOCAL_CLUSTER]) == 5 + assert len(col) == col_len+5 + + def test_or(self): + col = self._create_collection() + col_len = len(col) + + other_data = { + LOCAL_CLUSTER: { + 3: pyslurm.db.Job(3), + 2: pyslurm.db.Job(2), + }, + "test_cluster": { + 1000: pyslurm.db.Job(1000, cluster="test_cluster"), + 1001: pyslurm.db.Job(1001, cluster="test_cluster"), + } + } + other_col = pyslurm.db.Jobs() + other_col.update(other_data) + + _col = col | other_col + assert isinstance(_col, pyslurm.xcollections.MultiClusterMap) + assert isinstance(_col, pyslurm.db.Jobs) + assert len(_col.clusters()) == 3 + assert len(_col) == col_len+3 + + dict_data = { + 10: pyslurm.db.Job(10), + 11: pyslurm.db.Job(11), + } + + _col = _col | dict_data + assert isinstance(_col, pyslurm.xcollections.MultiClusterMap) + assert isinstance(_col, pyslurm.db.Jobs) + assert len(_col.clusters()) == 3 + assert len(_col[LOCAL_CLUSTER]) == 5 + assert len(_col) == col_len+5 From 0e4a327bee7a0eb57d57c7f1bf5765d5be4d9198 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Tue, 18 Jul 2023 21:09:20 +0200 Subject: [PATCH 31/48] Update CHANGELOG (#307) --- CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c50aa1b5..aad36508 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased on the [23.2.x](https://github.com/PySlurm/pyslurm/tree/23.2.x) branch +- New Classes to interact with Database Associations (WIP) + - `pyslurm.db.Association` + - `pyslurm.db.Associations` +- New Classes to interact with Database QoS (WIP) + - `pyslurm.db.QualityOfService` + - `pyslurm.db.QualitiesOfService` + +## [23.2.2](https://github.com/PySlurm/pyslurm/releases/tag/v23.2.2) - 2023-07-18 + ### Added - Ability to modify Database Jobs From a0dfa6b54b37d1f5352f354dffc015c6bb2d9377 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Tue, 18 Jul 2023 22:57:30 +0200 Subject: [PATCH 32/48] Fix recursively transforming collections to JSON (#308) - and add testing for it --- pyslurm/core/job/job.pyx | 4 ++- pyslurm/core/job/step.pyx | 11 +++++++- pyslurm/db/job.pyx | 7 ++--- pyslurm/db/step.pyx | 4 +++ pyslurm/xcollections.pyx | 3 ++ tests/integration/test_db_job.py | 15 ++++++++++ tests/integration/test_job.py | 44 ++++++++++++++++++++++++----- tests/integration/test_job_steps.py | 16 +++++------ tests/integration/test_node.py | 18 +++++++----- tests/integration/test_partition.py | 11 ++++++++ 10 files changed, 104 insertions(+), 29 deletions(-) diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 8ccc7f66..6e8adcbb 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -295,7 +295,9 @@ cdef class Job: Returns: (dict): Job information as dict """ - return instance_to_dict(self) + cdef dict out = instance_to_dict(self) + out["steps"] = self.steps.to_dict() + return out def send_signal(self, signal, steps="children", hurry=False): """Send a signal to a running Job. diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index 4227e901..18c7a6f5 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -143,6 +143,9 @@ cdef class JobSteps(dict): cdef JobSteps steps = JobSteps() return steps._load_data(slurm.NO_VAL, slurm.SHOW_ALL) + def to_dict(self): + return xcollections.dict_recursive(self) + cdef class JobStep: @@ -329,7 +332,13 @@ cdef class JobStep: Returns: (dict): JobStep information as dict """ - return instance_to_dict(self) + cdef dict out = instance_to_dict(self) + + dist = self.distribution + if dist: + out["distribution"] = dist.to_dict() + + return out @property def id(self): diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 6679a77c..beea1861 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -524,11 +524,8 @@ cdef class Job: if self.stats: out["stats"] = self.stats.to_dict() - - steps = out.pop("steps", {}) - out["steps"] = {} - for step_id, step in steps.items(): - out["steps"][step_id] = step.to_dict() + if self.steps: + out["steps"] = self.steps.to_dict() return out diff --git a/pyslurm/db/step.pyx b/pyslurm/db/step.pyx index ee809f01..2d71ca73 100644 --- a/pyslurm/db/step.pyx +++ b/pyslurm/db/step.pyx @@ -27,6 +27,7 @@ from pyslurm.core.error import RPCError from typing import Union from pyslurm.utils.uint import * from pyslurm.utils.ctime import _raw_time +from pyslurm import xcollections from pyslurm.utils.helpers import ( gid_to_name, uid_to_name, @@ -43,6 +44,9 @@ cdef class JobSteps(dict): data = super().__repr__() return f'pyslurm.db.{self.__class__.__name__}({data})' + def to_dict(self): + return xcollections.dict_recursive(self) + cdef class JobStep: diff --git a/pyslurm/xcollections.pyx b/pyslurm/xcollections.pyx index b483cb40..a0ce2e6b 100644 --- a/pyslurm/xcollections.pyx +++ b/pyslurm/xcollections.pyx @@ -447,6 +447,9 @@ cdef class MultiClusterMap: Returns: (str): JSON formatted string from `json.dumps()` """ + if not self.data: + return '{}' + data = multi_dict_recursive(self) if multi_cluster: return json.dumps(data) diff --git a/tests/integration/test_db_job.py b/tests/integration/test_db_job.py index 1ea59690..310df51f 100644 --- a/tests/integration/test_db_job.py +++ b/tests/integration/test_db_job.py @@ -24,6 +24,7 @@ import pyslurm import time import util +import json # TODO: Instead of submitting new Jobs and waiting to test Database API @@ -55,6 +56,20 @@ def test_parse_all(submit_job): assert job_dict["steps"] +def test_to_json(submit_job): + job = submit_job() + util.wait() + + jfilter = pyslurm.db.JobFilter(ids=[job.id]) + jobs = pyslurm.db.Jobs.load(jfilter) + + json_data = jobs.to_json() + dict_data = json.loads(json_data) + assert dict_data + assert json_data + assert len(dict_data) == 1 + + def test_modify(submit_job): job = submit_job() util.wait(5) diff --git a/tests/integration/test_job.py b/tests/integration/test_job.py index 9788af45..8c9d4750 100644 --- a/tests/integration/test_job.py +++ b/tests/integration/test_job.py @@ -23,6 +23,7 @@ import time import pytest import pyslurm +import json import util from util import create_simple_job_desc from pyslurm import ( @@ -64,34 +65,34 @@ def test_cancel(submit_job): job = submit_job() job.cancel() # make sure the job is actually cancelled - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() assert Job.load(job.id).state == "CANCELLED" def test_send_signal(submit_job): job = submit_job() - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() assert Job.load(job.id).state == "RUNNING" # Send a SIGKILL (basically cancelling the Job) job.send_signal(9) # make sure the job is actually cancelled - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() assert Job.load(job.id).state == "CANCELLED" def test_suspend_unsuspend(submit_job): job = submit_job() - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() job.suspend() assert Job.load(job.id).state == "SUSPENDED" job.unsuspend() # make sure the job is actually running again - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() assert Job.load(job.id).state == "RUNNING" @@ -121,7 +122,7 @@ def test_requeue(submit_job): assert job.requeue_count == 0 - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() job.requeue() job = Job.load(job.id) @@ -130,7 +131,7 @@ def test_requeue(submit_job): def test_notify(submit_job): job = submit_job() - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() # Could check the logfile, but we just assume for now # that when this function raises no Exception, everything worked. @@ -155,6 +156,35 @@ def test_get_job_queue(submit_job): assert isinstance(jobs[job.id], Job) +def test_load_steps(submit_job): + job_list = [submit_job() for i in range(3)] + util.wait() + + jobs = Jobs.load() + jobs.load_steps() + + for _job in job_list: + job = jobs[_job.id] + assert job.state == "RUNNING" + assert job.steps + assert isinstance(job.steps, pyslurm.JobSteps) + assert job.steps.get("batch") + + +def test_to_json(submit_job): + job_list = [submit_job() for i in range(3)] + util.wait() + + jobs = Jobs.load() + jobs.load_steps() + + json_data = jobs.to_json() + dict_data = json.loads(json_data) + assert dict_data + assert json_data + assert len(dict_data) >= 3 + + def test_get_resource_layout_per_node(submit_job): # TODO assert True diff --git a/tests/integration/test_job_steps.py b/tests/integration/test_job_steps.py index 8d13ba9f..e61f9ad1 100644 --- a/tests/integration/test_job_steps.py +++ b/tests/integration/test_job_steps.py @@ -57,7 +57,7 @@ def test_load(submit_job): # Load the step info, waiting one second to make sure the Step # actually exists. - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() step = JobStep.load(job.id, "batch") assert step.id == "batch" @@ -101,7 +101,7 @@ def test_load(submit_job): def test_collection(submit_job): job = submit_job(script=create_job_script_multi_step()) - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() steps = JobSteps.load(job) assert steps @@ -115,7 +115,7 @@ def test_collection(submit_job): def test_cancel(submit_job): job = submit_job(script=create_job_script_multi_step()) - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() steps = JobSteps.load(job) assert len(steps) == 3 assert ("batch" in steps and @@ -124,7 +124,7 @@ def test_cancel(submit_job): steps[0].cancel() - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() steps = JobSteps.load(job) assert len(steps) == 2 assert ("batch" in steps and @@ -135,7 +135,7 @@ def test_modify(submit_job): steps = "srun -t 20 sleep 100" job = submit_job(script=create_job_script_multi_step(steps)) - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() step = JobStep.load(job, 0) assert step.time_limit == 20 @@ -150,7 +150,7 @@ def test_send_signal(submit_job): steps = "srun -t 10 sleep 100" job = submit_job(script=create_job_script_multi_step(steps)) - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() step = JobStep.load(job, 0) assert step.state == "RUNNING" @@ -159,7 +159,7 @@ def test_send_signal(submit_job): # Make sure the job is actually cancelled. # If a RPCError is raised, this means the Step got cancelled. - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() with pytest.raises(RPCError): step = JobStep.load(job, 0) @@ -173,5 +173,5 @@ def test_load_with_wrong_step_id(submit_job): def test_parse_all(submit_job): job = submit_job() - time.sleep(util.WAIT_SECS_SLURMCTLD) + util.wait() JobStep.load(job, "batch").to_dict() diff --git a/tests/integration/test_node.py b/tests/integration/test_node.py index a1c9f6b6..94ede1e5 100644 --- a/tests/integration/test_node.py +++ b/tests/integration/test_node.py @@ -20,11 +20,9 @@ # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """test_node.py - Test the node api functions.""" -import sys -import time import pytest import pyslurm -import os +import json from pyslurm import Node, Nodes, RPCError @@ -51,10 +49,6 @@ def test_create(): Node("testhostpyslurm2").create("idle") -# def test_delete(): -# node = Node("testhost1").delete() - - def test_modify(): _, node = Nodes.load().popitem() @@ -71,3 +65,13 @@ def test_modify(): def test_parse_all(): _, node = Nodes.load().popitem() assert node.to_dict() + + +def test_to_json(): + nodes = Nodes.load() + json_data = nodes.to_json() + dict_data = json.loads(json_data) + + assert dict_data + assert len(dict_data) >= 1 + assert json_data diff --git a/tests/integration/test_partition.py b/tests/integration/test_partition.py index 712eeaff..30c54f92 100644 --- a/tests/integration/test_partition.py +++ b/tests/integration/test_partition.py @@ -23,6 +23,7 @@ import pytest import pyslurm +import json import util from pyslurm import Partition, Partitions, RPCError @@ -72,6 +73,16 @@ def test_parse_all(): assert part.to_dict() +def test_to_json(): + parts = Partitions.load() + json_data = parts.to_json() + dict_data = json.loads(json_data) + + assert dict_data + assert len(dict_data) >= 1 + assert json_data + + def test_reload(): _partnames = [util.randstr() for i in range(3)] _tmp_parts = Partitions(_partnames) From 2810957984d3435bb1084361df4a4833c4ea9f7f Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Tue, 18 Jul 2023 23:13:08 +0200 Subject: [PATCH 33/48] Bump version to 23.2.2 (#309) --- pyslurm/__version__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyslurm/__version__.py b/pyslurm/__version__.py index a3011f6e..96711e72 100644 --- a/pyslurm/__version__.py +++ b/pyslurm/__version__.py @@ -1 +1 @@ -__version__ = "23.2.1" +__version__ = "23.2.2" diff --git a/setup.py b/setup.py index 98032f6c..afd8a2eb 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ CYTHON_VERSION_MIN = "0.29.30" SLURM_RELEASE = "23.2" -PYSLURM_PATCH_RELEASE = "1" +PYSLURM_PATCH_RELEASE = "2" SLURM_SHARED_LIB = "libslurm.so" CURRENT_DIR = pathlib.Path(__file__).parent From b435d688b8e91686ab3277800fd7b843e993be6c Mon Sep 17 00:00:00 2001 From: robgics <32717310+robgics@users.noreply.github.com> Date: Thu, 24 Aug 2023 11:24:27 -0400 Subject: [PATCH 34/48] Fix a reference to an invalid property and return the correct value from the calculation. (#313) --- pyslurm/core/job/job.pyx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index 6e8adcbb..e8e65d58 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -1093,10 +1093,11 @@ cdef class Job: mem_node = self.memory_per_node if mem_node is not None: - num_nodes = self.min_nodes + num_nodes = self.num_nodes if num_nodes is not None: mem_node *= num_nodes - return mem_cpu + return mem_node + # TODO # mem_gpu = self.memory_per_gpu From 4ecdfa047d09c5364a4101f2153a899b3f084875 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Sat, 26 Aug 2023 18:29:39 +0200 Subject: [PATCH 35/48] Nodes: Some Bugfixes and new attributes (#318) * Nodes: Some Bugfixes and new attributes - add idle_memory and allocated_tres properties - correctly return GRES in allocated_gres property - trailing whitespace fixes - add some new tests for GRES parsing --- pyslurm/core/node.pxd | 14 ++++++- pyslurm/core/node.pyx | 83 ++++++++++++++++++++++++++------------- pyslurm/utils/cstr.pyx | 21 +++++----- pyslurm/utils/helpers.pxd | 1 + pyslurm/utils/helpers.pyx | 15 +++++-- tests/unit/test_common.py | 64 ++++++++++++++++++------------ 6 files changed, 131 insertions(+), 67 deletions(-) diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index d889b723..d5e87da4 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -68,8 +68,12 @@ cdef class Nodes(MultiClusterMap): Attributes: free_memory (int): Amount of free memory in this node collection. (in Mebibytes) + Note that this means actual free memory as returned by the `free` + command real_memory (int): Amount of real memory in this node collection. (in Mebibytes) + idle_memory (int): + Amount of idle memory in this node collection. (in Mebibytes) allocated_memory (int): Amount of alloc Memory in this node collection. (in Mebibytes) total_cpus (int): @@ -100,7 +104,7 @@ cdef class Node: Other Parameters: configured_gres (dict): - Configured GRES for the node + Configured GRES for the node address (str): Address of the node hostname (str): @@ -160,6 +164,10 @@ cdef class Node: Real Memory in Mebibytes configured for this node. free_memory (int): Free Memory in Mebibytes on the node. + Note that this means actual free memory as returned by the `free` + command + idle_memory (int): + Idle Memory in Mebibytes on the node. memory_reserved_for_system (int): Memory in Mebibytes reserved for the System not usable by Jobs. temporary_disk (int): @@ -194,6 +202,8 @@ cdef class Node: Time this node was last busy, as unix timestamp. reason_time (int): Time the reason was set for the node, as unix timestamp. + allocated_tres (dict): + Currently allocated Trackable Resources allocated_cpus (int): Number of allocated CPUs on the node. idle_cpus (int): @@ -235,4 +245,4 @@ cdef class Node: @staticmethod cdef Node from_ptr(node_info_t *in_ptr) - + diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index 5f8c7e2d..bf5676a4 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -33,13 +33,14 @@ from pyslurm import xcollections from pyslurm.utils.helpers import ( uid_to_name, gid_to_name, - humanize, + humanize, _getgrall_to_dict, _getpwall_to_dict, cpubind_to_num, instance_to_dict, nodelist_from_range_str, nodelist_to_range_str, + gres_from_tres_dict, ) @@ -65,7 +66,7 @@ cdef class Nodes(MultiClusterMap): """Load all nodes in the system. Args: - preload_passwd_info (bool): + preload_passwd_info (bool): Decides whether to query passwd and groups information from the system. Could potentially speed up access to attributes of the Node @@ -83,7 +84,7 @@ cdef class Nodes(MultiClusterMap): dict passwd = {} dict groups = {} Nodes nodes = Nodes() - int flags = slurm.SHOW_ALL + int flags = slurm.SHOW_ALL | slurm.SHOW_DETAIL Node node verify_rpc(slurm_load_node(0, &nodes.info, flags)) @@ -107,6 +108,12 @@ cdef class Nodes(MultiClusterMap): # is raised by replacing it with a zeroed-out node_info_t. nodes.info.node_array[cnt] = nodes.tmp_info + name = node.name + if not name: + # Could be possible if there are nodes configured in + # slurm.conf that cannot be reached anymore. + continue + if preload_passwd_info: node.passwd = passwd node.groups = groups @@ -114,7 +121,7 @@ cdef class Nodes(MultiClusterMap): cluster = node.cluster if cluster not in nodes.data: nodes.data[cluster] = {} - nodes.data[cluster][node.name] = node + nodes.data[cluster][name] = node # We have extracted all pointers nodes.info.record_count = 0 @@ -162,7 +169,7 @@ cdef class Nodes(MultiClusterMap): n._alloc_umsg() cstr.fmalloc(&n.umsg.node_names, node_str) verify_rpc(slurm_update_node(n.umsg)) - + @property def free_memory(self): return xcollections.sum_property(self, Node.free_memory) @@ -171,6 +178,10 @@ cdef class Nodes(MultiClusterMap): def real_memory(self): return xcollections.sum_property(self, Node.real_memory) + @property + def idle_memory(self): + return xcollections.sum_property(self, Node.idle_memory) + @property def allocated_memory(self): return xcollections.sum_property(self, Node.allocated_memory) @@ -186,7 +197,7 @@ cdef class Nodes(MultiClusterMap): @property def allocated_cpus(self): return xcollections.sum_property(self, Node.allocated_cpus) - + @property def effective_cpus(self): return xcollections.sum_property(self, Node.effective_cpus) @@ -237,7 +248,7 @@ cdef class Node: xfree(self.info) def __dealloc__(self): - self._dealloc_impl() + self._dealloc_impl() def __setattr__(self, name, val): # When a user wants to set attributes on a Node instance that was @@ -264,7 +275,7 @@ cdef class Node: cdef _swap_data(Node dst, Node src): cdef node_info_t *tmp = NULL if dst.info and src.info: - tmp = dst.info + tmp = dst.info dst.info = src.info src.info = tmp @@ -319,7 +330,7 @@ cdef class Node: Implements the slurm_create_node RPC. Args: - state (str, optional): + state (str, optional): An optional state the created Node should have. Allowed values are `future` and `cloud`. `future` is the default. @@ -421,7 +432,7 @@ cdef class Node: @configured_gres.setter def configured_gres(self, val): - cstr.fmalloc2(&self.info.gres, &self.umsg.gres, + cstr.fmalloc2(&self.info.gres, &self.umsg.gres, cstr.from_gres_dict(val)) @property @@ -451,7 +462,7 @@ cdef class Node: @extra.setter def extra(self, val): cstr.fmalloc2(&self.info.extra, &self.umsg.extra, val) - + @property def reason(self): return cstr.to_unicode(self.info.reason) @@ -486,7 +497,7 @@ cdef class Node: @property def allocated_gres(self): - return cstr.to_gres_dict(self.info.gres_used) + return gres_from_tres_dict(self.allocated_tres) @property def mcs_label(self): @@ -511,6 +522,11 @@ cdef class Node: def free_memory(self): return u64_parse(self.info.free_mem) + @property + def idle_memory(self): + real = self.real_memory + return 0 if not real else real - self.allocated_memory + @property def memory_reserved_for_system(self): return u64_parse(self.info.mem_spec_limit) @@ -596,17 +612,17 @@ cdef class Node: # """dict: TRES that are configured on the node.""" # return cstr.to_dict(self.info.tres_fmt_str) -# @property -# def tres_alloc(self): -# cdef char *alloc_tres = NULL -# if self.info.select_nodeinfo: -# slurm_get_select_nodeinfo( -# self.info.select_nodeinfo, -# slurm.SELECT_NODEDATA_TRES_ALLOC_FMT_STR, -# slurm.NODE_STATE_ALLOCATED, -# &alloc_tres -# ) -# return cstr.to_gres_dict(alloc_tres) + @property + def allocated_tres(self): + cdef char *alloc_tres = NULL + if self.info.select_nodeinfo: + slurm_get_select_nodeinfo( + self.info.select_nodeinfo, + slurm.SELECT_NODEDATA_TRES_ALLOC_FMT_STR, + slurm.NODE_STATE_ALLOCATED, + &alloc_tres + ) + return cstr.to_dict(alloc_tres) @property def allocated_cpus(self): @@ -671,10 +687,22 @@ cdef class Node: "temperature": u32_parse(self.info.ext_sensors.temperature) } + @property + def _node_state(self): + idle_cpus = self.idle_cpus + state = self.info.node_state + + if idle_cpus and idle_cpus != self.effective_cpus: + # If we aren't idle but also not allocated, then set state to + # MIXED. + state &= slurm.NODE_STATE_FLAGS + state |= slurm.NODE_STATE_MIXED + + return state + @property def state(self): - cdef char* state = slurm_node_state_string_complete( - self.info.node_state) + cdef char* state = slurm_node_state_string_complete(self._node_state) state_str = cstr.to_unicode(state) xfree(state) return state_str @@ -685,9 +713,10 @@ cdef class Node: @property def next_state(self): + state = self._node_state if ((self.info.next_state != slurm.NO_VAL) - and (self.info.node_state & slurm.NODE_STATE_REBOOT_REQUESTED - or self.info.node_state & slurm.NODE_STATE_REBOOT_ISSUED)): + and (state & slurm.NODE_STATE_REBOOT_REQUESTED + or state & slurm.NODE_STATE_REBOOT_ISSUED)): return cstr.to_unicode( slurm_node_state_string(self.info.next_state)) else: diff --git a/pyslurm/utils/cstr.pyx b/pyslurm/utils/cstr.pyx index 13795544..0b8aa2a8 100644 --- a/pyslurm/utils/cstr.pyx +++ b/pyslurm/utils/cstr.pyx @@ -133,7 +133,7 @@ cpdef dict to_dict(char *str_dict, str delim1=",", str delim2="="): which can easily be converted to a dict. """ cdef: - str _str_dict = to_unicode(str_dict) + str _str_dict = to_unicode(str_dict) str key, val dict out = {} @@ -143,7 +143,7 @@ cpdef dict to_dict(char *str_dict, str delim1=",", str delim2="="): for kv in _str_dict.split(delim1): if delim2 in kv: key, val = kv.split(delim2, 1) - out[key] = val + out[key] = int(val) if val.isdigit() else val return out @@ -184,10 +184,10 @@ def dict_to_str(vals, prepend=None, delim1=",", delim2="="): if isinstance(vals, str): tmp_dict = validate_str_key_value_format(vals, delim1, delim2) - + for k, v in tmp_dict.items(): if ((delim1 in str(k) or delim2 in str(k)) or - delim1 in str(v) or delim2 in str(v)): + delim1 in str(v) or delim2 in str(v)): raise ValueError( f"Key or Value cannot contain either {delim1} or {delim2}. " f"Got Key: {k} and Value: {v}." @@ -208,6 +208,7 @@ cpdef dict to_gres_dict(char *gres): cdef: dict output = {} str gres_str = to_unicode(gres) + str gres_delim = "gres:" if not gres_str or gres_str == "(null)": return {} @@ -215,15 +216,15 @@ cpdef dict to_gres_dict(char *gres): for item in re.split(",(?=[^,]+?:)", gres_str): # Remove the additional "gres" specifier if it exists - if "gres:" in item: - item = item.replace("gres:", "") + if gres_delim in item: + item = item.replace(gres_delim, "") gres_splitted = re.split( - ":(?=[^:]+?)", + ":(?=[^:]+?)", item.replace("(", ":", 1).replace(")", "") ) - name, typ, cnt = gres_splitted[0], gres_splitted[1], 0 + name, typ, cnt = gres_splitted[0], gres_splitted[1], 0 # Check if we have a gres type. if typ.isdigit(): @@ -243,10 +244,10 @@ cpdef dict to_gres_dict(char *gres): # Cover cases with IDX idx = gres_splitted[3] if not typ else gres_splitted[4] output[name_and_typ] = { - "count": cnt, + "count": int(cnt), "indexes": idx, } - + return output diff --git a/pyslurm/utils/helpers.pxd b/pyslurm/utils/helpers.pxd index 5de4cf99..3f73c375 100644 --- a/pyslurm/utils/helpers.pxd +++ b/pyslurm/utils/helpers.pxd @@ -30,3 +30,4 @@ from libc.stdlib cimport free cpdef uid_to_name(uint32_t uid, err_on_invalid=*, dict lookup=*) cpdef gid_to_name(uint32_t gid, err_on_invalid=*, dict lookup=*) +cpdef gres_from_tres_dict(dict tres_dict) diff --git a/pyslurm/utils/helpers.pyx b/pyslurm/utils/helpers.pyx index 9fcd5896..4d5f6d0c 100644 --- a/pyslurm/utils/helpers.pyx +++ b/pyslurm/utils/helpers.pyx @@ -97,7 +97,7 @@ def user_to_uid(user, err_on_invalid=True): try: if isinstance(user, str) and not user.isdigit(): return getpwnam(user).pw_uid - + return getpwuid(int(user)).pw_uid except KeyError as e: if err_on_invalid: @@ -208,7 +208,7 @@ def nodelist_to_range_str(nodelist): char *nl = nodelist slurm.hostlist_t hl char *hl_ranged = NULL - + hl = slurm.slurm_hostlist_create(nl) if not hl: return None @@ -219,7 +219,7 @@ def nodelist_to_range_str(nodelist): free(hl_ranged) slurm.slurm_hostlist_destroy(hl) - return out + return out def humanize(num, decimals=1): @@ -378,3 +378,12 @@ def dehumanize_step_id(sid): return slurm.SLURM_PENDING_STEP else: return int(sid) + + +cpdef gres_from_tres_dict(dict tres_dict): + gres_prefix = "gres/" + return { + k.replace(gres_prefix, ""):v + for k, v in tres_dict.items() + if gres_prefix in k + } diff --git a/tests/unit/test_common.py b/tests/unit/test_common.py index cf5353b1..4706130f 100644 --- a/tests/unit/test_common.py +++ b/tests/unit/test_common.py @@ -55,6 +55,7 @@ nodelist_from_range_str, nodelist_to_range_str, instance_to_dict, + gres_from_tres_dict, ) from pyslurm.utils import cstr from pyslurm.xcollections import ( @@ -84,7 +85,7 @@ def test_lists(self): n = Node() input_as_list = ["test1", "test2", "test3", "test4"] input_as_str = ",".join(input_as_list) - + n.available_features = input_as_list assert n.available_features == input_as_list @@ -101,9 +102,10 @@ def test_lists(self): assert n.available_features == [] def test_str_to_dict(self): - expected_dict = {"key1": "value1", "key2": "value2"} - input_str = "key1=value1,key2=value2" - assert cstr.to_dict(input_str) == expected_dict + expected_dict = {"cpu": 2, "mem": "11G", + "gres/gpu": 1, "gres/gpu:nvidia-a100": 1} + input_str = "cpu=2,mem=11G,gres/gpu=1,gres/gpu:nvidia-a100=1" + assert cstr.to_dict(input_str) == expected_dict assert cstr.to_dict("") == {} def test_dict_to_str(self): @@ -121,12 +123,12 @@ def test_dict_to_str(self): input_dict = {"key1=": "value1", "key2": "value2"} expected_str = "key1=value1,key2=value2" with pytest.raises(ValueError, - match=r"Key or Value cannot contain either*"): + match=r"Key or Value cannot contain either*"): assert cstr.dict_to_str(input_dict) == expected_str expected_str = "key1=value1,key2=value2" assert cstr.dict_to_str(expected_str) == expected_str - + assert cstr.dict_to_str({}) == None assert cstr.dict_to_str("") == None @@ -150,7 +152,19 @@ def test_dict_to_gres_str(self): assert cstr.from_gres_dict("tesla:3,a100:5", "gpu") == expected_str def test_str_to_gres_dict(self): - assert True + input_str = "gpu:nvidia-a100:1(IDX:0,1)" + expected = {"gpu:nvidia-a100":{"count": 1, "indexes": "0,1"}} + assert cstr.to_gres_dict(input_str) == expected + + input_str = "gpu:nvidia-a100:1" + expected = {"gpu:nvidia-a100": 1} + assert cstr.to_gres_dict(input_str) == expected + + def test_gres_from_tres_dict(self): + input_dict = {"cpu": 10, "mem": "5G", + "gres/gpu": 5, "gres/gpu:nvidia": 100} + expected = {"gpu": 5, "gpu:nvidia": 100} + assert gres_from_tres_dict(input_dict) == expected class TestUint: @@ -175,16 +189,16 @@ def _uint_impl(self, func_set, func_get, typ): assert func_get(val, zero_is_noval=False) == 0 with pytest.raises(TypeError, - match="an integer is required"): + match="an integer is required"): val = func_set("UNLIMITED") with pytest.raises(OverflowError, - match=r"can't convert negative value to*"): + match=r"can't convert negative value to*"): val = func_set(-1) with pytest.raises(OverflowError, match=r"value too large to convert to*|" - "Python int too large*"): + "Python int too large*"): val = func_set(2**typ) def test_u8(self): @@ -212,7 +226,7 @@ def test_set_parse_bool_flag(self): assert part.is_root_only assert not part.is_default assert not part.allow_root_jobs - + part.is_default = False part.is_hidden = False assert not part.is_hidden @@ -254,13 +268,13 @@ def test_parse_minutes(self): mins_str = "01:00:00" assert timestr_to_mins(mins_str) == mins - assert timestr_to_mins("UNLIMITED") == 2**32-1 + assert timestr_to_mins("UNLIMITED") == 2**32-1 assert timestr_to_mins(None) == 2**32-2 assert mins_to_timestr(mins) == mins_str assert mins_to_timestr(2**32-1) == "UNLIMITED" assert mins_to_timestr(2**32-2) == None - assert mins_to_timestr(0) == None + assert mins_to_timestr(0) == None with pytest.raises(ValueError, match="Invalid Time Specification: invalid_val."): @@ -271,13 +285,13 @@ def test_parse_seconds(self): secs_str = "01:00:00" assert timestr_to_secs(secs_str) == secs - assert timestr_to_secs("UNLIMITED") == 2**32-1 + assert timestr_to_secs("UNLIMITED") == 2**32-1 assert timestr_to_secs(None) == 2**32-2 assert secs_to_timestr(secs) == secs_str assert secs_to_timestr(2**32-1) == "UNLIMITED" assert secs_to_timestr(2**32-2) == None - assert secs_to_timestr(0) == None + assert secs_to_timestr(0) == None with pytest.raises(ValueError, match="Invalid Time Specification: invalid_val."): @@ -309,7 +323,7 @@ def test_parse_uid(self): assert name == "root" lookup = {0: "root"} - name = uid_to_name(0, lookup=lookup) + name = uid_to_name(0, lookup=lookup) assert name == "root" assert user_to_uid("root") == 0 @@ -327,7 +341,7 @@ def test_parse_gid(self): assert name == "root" lookup = {0: "root"} - name = gid_to_name(0, lookup=lookup) + name = gid_to_name(0, lookup=lookup) assert name == "root" assert group_to_gid("root") == 0 @@ -346,11 +360,11 @@ def test_expand_range_str(self): def test_humanize(self): val = humanize(1024) - assert val == "1.0G" + assert val == "1.0G" val = humanize(2**20) assert val == "1.0T" - + val = humanize(800) assert val == "800.0M" @@ -368,19 +382,19 @@ def test_dehumanize(self): val = dehumanize(1024) assert val == 1024 - val = dehumanize("2M") + val = dehumanize("2M") assert val == 2 - val = dehumanize("10G") + val = dehumanize("10G") assert val == 10240 - val = dehumanize("9.6G") + val = dehumanize("9.6G") assert val == round(1024*9.6) - val = dehumanize("10T") + val = dehumanize("10T") assert val == 10*(2**20) - val = dehumanize("10T", target="G") + val = dehumanize("10T", target="G") assert val == 10*(2**10) with pytest.raises(ValueError, @@ -390,7 +404,7 @@ def test_dehumanize(self): with pytest.raises(ValueError, match="could not convert string to float: 'invalid_val'"): val = dehumanize("invalid_valM") - + def test_signal_to_num(self): sig = signal_to_num("SIGKILL") assert sig == 9 From 7ebf64aea80b3eb3a7fe554c2248e5a5e0246082 Mon Sep 17 00:00:00 2001 From: tazend <75485188+tazend@users.noreply.github.com> Date: Fri, 8 Sep 2023 21:08:25 +0200 Subject: [PATCH 36/48] Add new attributes to db.Jobs and truncate_time option to db.JobFilter (#321) --- CHANGELOG.md | 8 +++ pyslurm/db/job.pxd | 61 +++++++++++++++++- pyslurm/db/job.pyx | 33 +++++++++- pyslurm/db/stats.pxd | 3 + pyslurm/db/stats.pyx | 143 ++++++++++++++++++++++++++----------------- 5 files changed, 185 insertions(+), 63 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aad36508..7b83eb65 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - New Classes to interact with Database QoS (WIP) - `pyslurm.db.QualityOfService` - `pyslurm.db.QualitiesOfService` +- Add `truncate_time` option to `pyslurm.db.JobFilter`, which is the same as -T / + --truncate from sacct. +- Add new Attributes to `pyslurm.db.Jobs` that help gathering statistics for a + collection of Jobs more convenient. +- Fix `allocated_gres` attribute in the `pyslurm.Node` Class returning nothing. +- Add new `idle_memory` and `allocated_tres` attributes to `pyslurm.Node` class +- Fix Node State being displayed as `ALLOCATED` when it should actually be + `MIXED`. ## [23.2.2](https://github.com/PySlurm/pyslurm/releases/tag/v23.2.2) - 2023-07-18 diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index 70ef0311..a06791bf 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -123,6 +123,12 @@ cdef class JobFilter: Instruct the slurmdbd to also send the job environment(s) Note: This requires specifying explictiy job ids, and is mutually exclusive with `with_script` + truncate_time (bool): + Truncate start and end time. + For example, when a Job has actually started before the requested + `start_time`, the time will be truncated to `start_time`. Same + logic applies for `end_time`. This is like the `-T` / `--truncate` + option from `sacct`. """ cdef slurmdb_job_cond_t *ptr @@ -149,11 +155,60 @@ cdef class JobFilter: nodelist with_script with_env + truncate_time cdef class Jobs(MultiClusterMap): - """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.db.Job][] objects.""" - pass + """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.db.Job][] objects. + + Args: + jobs (Union[list[int], dict[int, pyslurm.db.Job], str], optional=None): + Jobs to initialize this collection with. + + Attributes: + consumed_energy (int): + Total amount of energy consumed, in joules. + disk_read (int): + Total amount of bytes read. + disk_write (int): + Total amount of bytes written. + page_faults (int): + Total amount of page faults. + resident_memory (int): + Total Resident Set Size (RSS) used in bytes. + virtual_memory (int): + Total Virtual Memory Size (VSZ) used in bytes. + elapsed_cpu_time (int): + Total amount of time used (Elapsed time * cpu count) in seconds. + This is not the real CPU-Efficiency, but rather the total amount + of cpu-time the CPUs were occupied for. + total_cpu_time (int): + Sum of `user_cpu_time` and `system_cpu_time`, in seconds + user_cpu_time (int): + Total amount of Time spent in user space, in seconds + system_cpu_time (int): + Total amount of Time spent in kernel space, in seconds + cpus (int): + Total amount of cpus. + nodes (int): + Total amount of nodes. + memory (int): + Total amount of requested memory in Mebibytes. + """ + cdef public: + consumed_energy + disk_read + disk_write + page_faults + resident_memory + virtual_memory + elapsed_cpu_time + total_cpu_time + user_cpu_time + system_cpu_time + cpus + nodes + memory cdef class Job: @@ -252,7 +307,7 @@ cdef class Job: Amount of CPUs the Job has/had allocated, or, if the Job is still pending, this will reflect the amount requested. memory (int): - Amount of memory the Job requested in total + Amount of memory the Job requested in total, in Mebibytes reservation (str): Name of the Reservation for this Job script (str): diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index beea1861..0457e1fa 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -29,6 +29,10 @@ from typing import Any from pyslurm.utils.uint import * from pyslurm.settings import LOCAL_CLUSTER from pyslurm import xcollections +from pyslurm.db.stats import ( + reset_stats_for_job_collection, + add_stats_to_job_collection, +) from pyslurm.utils.ctime import ( date_to_timestamp, timestr_to_mins, @@ -146,6 +150,9 @@ cdef class JobFilter: if self.nodelist: cstr.fmalloc(&ptr.used_nodes, nodelist_to_range_str(self.nodelist)) + + if self.truncate_time: + ptr.flags &= ~slurm.JOBCOND_FLAG_NO_TRUNC if self.ids: # These are only allowed by the slurmdbd when specific jobs are @@ -196,6 +203,7 @@ cdef class Jobs(MultiClusterMap): val_type=Job, id_attr=Job.id, key_type=int) + self._reset_stats() @staticmethod def load(JobFilter db_filter=None, Connection db_connection=None): @@ -275,15 +283,35 @@ cdef class Jobs(MultiClusterMap): job = Job.from_ptr(job_ptr.data) job.qos_data = qos_data job._create_steps() - JobStatistics._sum_step_stats_for_job(job, job.steps) + job.stats = JobStatistics.from_job_steps(job) cluster = job.cluster if cluster not in out.data: out.data[cluster] = {} out[cluster][job.id] = job + add_stats_to_job_collection(out, job.stats) + out.cpus += job.cpus + out.nodes += job.num_nodes + out.memory += job.memory + return out + def _reset_stats(self): + reset_stats_for_job_collection(self) + self.cpus = 0 + self.nodes = 0 + self.memory = 0 + + def calc_stats(self): + """(Re)Calculate Statistics for the Job Collection.""" + self._reset_stats() + for job in self.values(): + add_stats_to_job_collection(self, job.stats) + self.cpus += job.cpus + self.nodes += job.num_nodes + self.memory += job.memory + @staticmethod def modify(db_filter, Job changes, db_connection=None): """Modify Slurm database Jobs. @@ -445,7 +473,6 @@ cdef class Job: cdef Job wrap = Job.__new__(Job) wrap.ptr = in_ptr wrap.steps = JobSteps.__new__(JobSteps) - wrap.stats = JobStatistics() return wrap @staticmethod @@ -738,7 +765,7 @@ cdef class Job: else: # Job is still pending, so we return the number of requested cpus # instead. - return u32_parse(self.ptr.req_cpus) + return u32_parse(self.ptr.req_cpus, on_noval=0, zero_is_noval=False) @property def memory(self): diff --git a/pyslurm/db/stats.pxd b/pyslurm/db/stats.pxd index 1ca9c701..5615b2c3 100644 --- a/pyslurm/db/stats.pxd +++ b/pyslurm/db/stats.pxd @@ -139,6 +139,9 @@ cdef class JobStatistics: user_cpu_time system_cpu_time + @staticmethod + cdef JobStatistics from_job_steps(Job job) + @staticmethod cdef JobStatistics from_step(JobStep step) diff --git a/pyslurm/db/stats.pyx b/pyslurm/db/stats.pyx index 7bbb2a8a..c2da1145 100644 --- a/pyslurm/db/stats.pyx +++ b/pyslurm/db/stats.pyx @@ -28,6 +28,32 @@ from pyslurm.utils.helpers import ( ) +def reset_stats_for_job_collection(jobs): + jobs.consumed_energy = 0 + jobs.disk_read = 0 + jobs.disk_write = 0 + jobs.page_faults = 0 + jobs.resident_memory = 0 + jobs.virtual_memory = 0 + jobs.elapsed_cpu_time = 0 + jobs.total_cpu_time = 0 + jobs.user_cpu_time = 0 + jobs.system_cpu_time = 0 + + +def add_stats_to_job_collection(jobs, JobStatistics js): + jobs.consumed_energy += js.consumed_energy + jobs.disk_read += js.avg_disk_read + jobs.disk_write += js.avg_disk_write + jobs.page_faults += js.avg_page_faults + jobs.resident_memory += js.avg_resident_memory + jobs.virtual_memory += js.avg_virtual_memory + jobs.elapsed_cpu_time += js.elapsed_cpu_time + jobs.total_cpu_time += js.total_cpu_time + jobs.user_cpu_time += js.user_cpu_time + jobs.system_cpu_time += js.system_cpu_time + + cdef class JobStatistics: def __init__(self): @@ -50,6 +76,21 @@ cdef class JobStatistics: def to_dict(self): return instance_to_dict(self) + @staticmethod + cdef JobStatistics from_job_steps(Job job): + cdef JobStatistics job_stats = JobStatistics() + + for step in job.steps.values(): + job_stats._add_base_stats(step.stats) + + job_stats._sum_cpu_time(job) + + step_count = len(job.steps) + if step_count: + job_stats.avg_cpu_frequency /= step_count + + return job_stats + @staticmethod cdef JobStatistics from_step(JobStep step): cdef JobStatistics wrap = JobStatistics() @@ -140,68 +181,56 @@ cdef class JobStatistics: return wrap - @staticmethod - def _sum_step_stats_for_job(Job job, JobSteps steps): - cdef: - JobStatistics job_stats = job.stats - JobStatistics step_stats = None - - for step in steps.values(): - step_stats = step.stats - - job_stats.consumed_energy += step_stats.consumed_energy - job_stats.avg_cpu_time += step_stats.avg_cpu_time - job_stats.avg_cpu_frequency += step_stats.avg_cpu_frequency - job_stats.avg_disk_read += step_stats.avg_disk_read - job_stats.avg_disk_write += step_stats.avg_disk_write - job_stats.avg_page_faults += step_stats.avg_page_faults - - if step_stats.max_disk_read >= job_stats.max_disk_read: - job_stats.max_disk_read = step_stats.max_disk_read - job_stats.max_disk_read_node = step_stats.max_disk_read_node - job_stats.max_disk_read_task = step_stats.max_disk_read_task - - if step_stats.max_disk_write >= job_stats.max_disk_write: - job_stats.max_disk_write = step_stats.max_disk_write - job_stats.max_disk_write_node = step_stats.max_disk_write_node - job_stats.max_disk_write_task = step_stats.max_disk_write_task - - if step_stats.max_page_faults >= job_stats.max_page_faults: - job_stats.max_page_faults = step_stats.max_page_faults - job_stats.max_page_faults_node = step_stats.max_page_faults_node - job_stats.max_page_faults_task = step_stats.max_page_faults_task - - if step_stats.max_resident_memory >= job_stats.max_resident_memory: - job_stats.max_resident_memory = step_stats.max_resident_memory - job_stats.max_resident_memory_node = step_stats.max_resident_memory_node - job_stats.max_resident_memory_task = step_stats.max_resident_memory_task - job_stats.avg_resident_memory = job_stats.max_resident_memory - - if step_stats.max_virtual_memory >= job_stats.max_virtual_memory: - job_stats.max_virtual_memory = step_stats.max_virtual_memory - job_stats.max_virtual_memory_node = step_stats.max_virtual_memory_node - job_stats.max_virtual_memory_task = step_stats.max_virtual_memory_task - job_stats.avg_virtual_memory = job_stats.max_virtual_memory - - if step_stats.min_cpu_time >= job_stats.min_cpu_time: - job_stats.min_cpu_time = step_stats.min_cpu_time - job_stats.min_cpu_time_node = step_stats.min_cpu_time_node - job_stats.min_cpu_time_task = step_stats.min_cpu_time_task - + def _add_base_stats(self, JobStatistics src): + self.consumed_energy += src.consumed_energy + self.avg_cpu_time += src.avg_cpu_time + self.avg_cpu_frequency += src.avg_cpu_frequency + self.avg_disk_read += src.avg_disk_read + self.avg_disk_write += src.avg_disk_write + self.avg_page_faults += src.avg_page_faults + + if src.max_disk_read >= self.max_disk_read: + self.max_disk_read = src.max_disk_read + self.max_disk_read_node = src.max_disk_read_node + self.max_disk_read_task = src.max_disk_read_task + + if src.max_disk_write >= self.max_disk_write: + self.max_disk_write = src.max_disk_write + self.max_disk_write_node = src.max_disk_write_node + self.max_disk_write_task = src.max_disk_write_task + + if src.max_page_faults >= self.max_page_faults: + self.max_page_faults = src.max_page_faults + self.max_page_faults_node = src.max_page_faults_node + self.max_page_faults_task = src.max_page_faults_task + + if src.max_resident_memory >= self.max_resident_memory: + self.max_resident_memory = src.max_resident_memory + self.max_resident_memory_node = src.max_resident_memory_node + self.max_resident_memory_task = src.max_resident_memory_task + self.avg_resident_memory = self.max_resident_memory + + if src.max_virtual_memory >= self.max_virtual_memory: + self.max_virtual_memory = src.max_virtual_memory + self.max_virtual_memory_node = src.max_virtual_memory_node + self.max_virtual_memory_task = src.max_virtual_memory_task + self.avg_virtual_memory = self.max_virtual_memory + + if src.min_cpu_time >= self.min_cpu_time: + self.min_cpu_time = src.min_cpu_time + self.min_cpu_time_node = src.min_cpu_time_node + self.min_cpu_time_task = src.min_cpu_time_task + + def _sum_cpu_time(self, Job job): if job.ptr.tot_cpu_sec != slurm.NO_VAL64: - job_stats.total_cpu_time = job.ptr.tot_cpu_sec + self.total_cpu_time += job.ptr.tot_cpu_sec if job.ptr.user_cpu_sec != slurm.NO_VAL64: - job_stats.user_cpu_time = job.ptr.user_cpu_sec + self.user_cpu_time += job.ptr.user_cpu_sec if job.ptr.sys_cpu_sec != slurm.NO_VAL64: - job_stats.system_cpu_time = job.ptr.sys_cpu_sec + self.system_cpu_time += job.ptr.sys_cpu_sec elapsed = job.elapsed_time if job.elapsed_time else 0 cpus = job.cpus if job.cpus else 0 - job_stats.elapsed_cpu_time = elapsed * cpus - - step_count = len(steps) - if step_count: - job_stats.avg_cpu_frequency /= step_count - + self.elapsed_cpu_time += elapsed * cpus From 2ee2696fa7c8c0299d405287791efe30f2f8e10c Mon Sep 17 00:00:00 2001 From: Andy Georges Date: Fri, 13 Oct 2023 14:06:35 +0200 Subject: [PATCH 37/48] bump: workflow image to ubuntu 22.04 LTS --- .github/workflows/pyslurm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pyslurm.yml b/.github/workflows/pyslurm.yml index 86bcb9d8..4804edf3 100644 --- a/.github/workflows/pyslurm.yml +++ b/.github/workflows/pyslurm.yml @@ -2,7 +2,7 @@ name: PySlurm on: [push, pull_request] jobs: Build: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 strategy: matrix: python-version: From 5b7629850c2277ced435dc32d5657a524ed1e003 Mon Sep 17 00:00:00 2001 From: Andy Georges Date: Fri, 13 Oct 2023 14:08:09 +0200 Subject: [PATCH 38/48] bump: slurm version in docker images to 23.02.6 --- docker-compose-github.yml | 2 +- docker-compose.yml | 2 +- pyslurm.spec | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker-compose-github.yml b/docker-compose-github.yml index 9087ab4b..f7da8c34 100644 --- a/docker-compose-github.yml +++ b/docker-compose-github.yml @@ -2,7 +2,7 @@ version: "3.8" services: slurm: - image: giovtorres/docker-centos7-slurm:21.08.6 + image: giovtorres/docker-centos7-slurm:23.02.6 hostname: slurmctl container_name: slurmctl stdin_open: true diff --git a/docker-compose.yml b/docker-compose.yml index 4061801c..345db5ae 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.8" services: slurm: - image: giovtorres/docker-centos7-slurm:21.08.0 + image: giovtorres/docker-centos7-slurm:23.02.6 hostname: slurmctl container_name: slurmctl stdin_open: true diff --git a/pyslurm.spec b/pyslurm.spec index faccae97..b80d1acd 100644 --- a/pyslurm.spec +++ b/pyslurm.spec @@ -1,6 +1,6 @@ # SPEC file taken from https://centos.pkgs.org/7/puias-computational-x86_64/python-pyslurm-17.02-1.gitab899c6.sdl7.x86_64.rpm.html Name: pyslurm -Version: 22.05.1 +Version: 23.2.2 %global rel 1 Release: %{rel}%{gittag}%{?dist}.ug Summary: PySlurm: Slurm Interface for Python From 32d1e6999571d86f23028b7e7246ec7c6fe49f80 Mon Sep 17 00:00:00 2001 From: Andy Georges Date: Fri, 13 Oct 2023 14:12:29 +0200 Subject: [PATCH 39/48] bump: back to ubuntu 20.04 LTS because of py3.6 --- .github/workflows/pyslurm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pyslurm.yml b/.github/workflows/pyslurm.yml index 4804edf3..86bcb9d8 100644 --- a/.github/workflows/pyslurm.yml +++ b/.github/workflows/pyslurm.yml @@ -2,7 +2,7 @@ name: PySlurm on: [push, pull_request] jobs: Build: - runs-on: ubuntu-22.04 + runs-on: ubuntu-20.04 strategy: matrix: python-version: From 141303f751162c1843271aa3c013bbc3199e40f7 Mon Sep 17 00:00:00 2001 From: Andy Georges Date: Fri, 13 Oct 2023 14:39:09 +0200 Subject: [PATCH 40/48] fix: other slurm image --- docker-compose-github.yml | 2 +- docker-compose.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose-github.yml b/docker-compose-github.yml index f7da8c34..8d460a18 100644 --- a/docker-compose-github.yml +++ b/docker-compose-github.yml @@ -2,7 +2,7 @@ version: "3.8" services: slurm: - image: giovtorres/docker-centos7-slurm:23.02.6 + image: ghcr.io/itkovian/rocky8-slurm:main hostname: slurmctl container_name: slurmctl stdin_open: true diff --git a/docker-compose.yml b/docker-compose.yml index 345db5ae..2e1763ae 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.8" services: slurm: - image: giovtorres/docker-centos7-slurm:23.02.6 + image: ghcr.io/itkovian/rocky8-slurm:main hostname: slurmctl container_name: slurmctl stdin_open: true From 3067a70b0975208f9a7871f34ff3fd3ff9665aea Mon Sep 17 00:00:00 2001 From: Andy Georges Date: Fri, 13 Oct 2023 19:35:14 +0200 Subject: [PATCH 41/48] bump: actions to newer version --- .github/workflows/pyslurm.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pyslurm.yml b/.github/workflows/pyslurm.yml index 86bcb9d8..d39e0e92 100644 --- a/.github/workflows/pyslurm.yml +++ b/.github/workflows/pyslurm.yml @@ -13,9 +13,9 @@ jobs: fail-fast: false steps: - name: Checkout repository code - uses: actions/checkout@v2 + uses: actions/checkout@main - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@main with: python-version: ${{ matrix.python-version }} - name: Install Dependencies From cff9c8af58d6c76c42c73e440d3a51a698fb04de Mon Sep 17 00:00:00 2001 From: Andy Georges Date: Tue, 17 Oct 2023 16:37:22 +0200 Subject: [PATCH 42/48] bump: python 3.9 version to 3.9.18 --- scripts/run_tests_in_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run_tests_in_container.py b/scripts/run_tests_in_container.py index 4110bcfd..aab1a7f0 100644 --- a/scripts/run_tests_in_container.py +++ b/scripts/run_tests_in_container.py @@ -8,7 +8,7 @@ "3.6": "3.6.15", "3.7": "3.7.12", "3.8": "3.8.12", - "3.9": "3.9.9", + "3.9": "3.9.18", "3.10": "3.10.0", } From 3f94b5d79d13df8b74313c3fc6a91d7ab72279ac Mon Sep 17 00:00:00 2001 From: Andy Georges Date: Tue, 17 Oct 2023 16:56:11 +0200 Subject: [PATCH 43/48] fix: verbose building --- scripts/run_tests_in_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run_tests_in_container.py b/scripts/run_tests_in_container.py index aab1a7f0..2328633f 100644 --- a/scripts/run_tests_in_container.py +++ b/scripts/run_tests_in_container.py @@ -17,7 +17,7 @@ def test_run(): host = testinfra.get_host(f"docker://slurmctl") python = f'python{os.environ.get("PYTHON")}' host.run(f'pyenv global {version_map[os.environ.get("PYTHON")]}') - print(host.check_output(f"{python} setup.py build")) + print(host.check_output(f"{python} setup.py build -v")) print(host.check_output(f"{python} setup.py install")) print(host.check_output("./scripts/configure.sh")) print(host.check_output(f"{python} -m pip uninstall --yes pytest")) From 7194d8295d15d0fe6c674541766d9711aea3781a Mon Sep 17 00:00:00 2001 From: Andy Georges Date: Wed, 10 Jan 2024 11:03:49 +0100 Subject: [PATCH 44/48] feat: support for building on el9 --- pyslurm.spec | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pyslurm.spec b/pyslurm.spec index b80d1acd..0f68e2f8 100644 --- a/pyslurm.spec +++ b/pyslurm.spec @@ -24,7 +24,11 @@ Source: %{pyslurm_source_dir}.tar.gz BuildRequires: python3-Cython, python36-devel %global usepython python3 %global usepython_sitearch %{python3_sitearch} -%else +%elif 0%{?rhel} == 9 +BuildRequires: python3-Cython, python39-devel +%global usepython python3 +%global usepython_sitearch %{python3_sitearch} +%%else BuildRequires: Cython, python-devel %global usepython python %global usepython_sitearch %{python_sitearch} From 43e242dea34bd45c5efc773a4234aa137c2b3ef8 Mon Sep 17 00:00:00 2001 From: Andy Georges Date: Wed, 10 Jan 2024 11:08:46 +0100 Subject: [PATCH 45/48] fix: rpm is called python3-devel --- pyslurm.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyslurm.spec b/pyslurm.spec index 0f68e2f8..0dadbbd0 100644 --- a/pyslurm.spec +++ b/pyslurm.spec @@ -25,7 +25,7 @@ BuildRequires: python3-Cython, python36-devel %global usepython python3 %global usepython_sitearch %{python3_sitearch} %elif 0%{?rhel} == 9 -BuildRequires: python3-Cython, python39-devel +BuildRequires: python3-Cython, python3-devel %global usepython python3 %global usepython_sitearch %{python3_sitearch} %%else From b8d437b941b0fbb86c599227839d1cb08e8c75a3 Mon Sep 17 00:00:00 2001 From: Andy Georges Date: Wed, 10 Jan 2024 11:26:19 +0100 Subject: [PATCH 46/48] fix: remove cython upper bound verison limit --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f6dd995d..1cb07d7a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,6 +3,6 @@ requires = [ "setuptools==59.2.0", "wheel==0.37.0", - "Cython>=0.29.30,<3.0", + "Cython>=0.29.30", ] From 2482522cd5b50ce225026971a21f2d258cd7f1d2 Mon Sep 17 00:00:00 2001 From: Andy Georges Date: Wed, 10 Jan 2024 11:27:36 +0100 Subject: [PATCH 47/48] fix: remove exceprtion raise --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index afd8a2eb..f3fedd27 100644 --- a/setup.py +++ b/setup.py @@ -262,7 +262,7 @@ def cythongen(): else: if LooseVersion(cython_version) < LooseVersion(CYTHON_VERSION_MIN): msg = f"Please use Cython version >= {CYTHON_VERSION_MIN}" - raise RuntimeError(msg) + #raise RuntimeError(msg) # Clean up temporary build objects first From 87c3ce874936cbbac9cdd9a8827c605fcf9bcb7c Mon Sep 17 00:00:00 2001 From: Bruno Travouillon Date: Fri, 13 Oct 2023 07:15:38 -0400 Subject: [PATCH 48/48] Remove undeclared KILL_JOB_ARRAY (#325) KILL_JOB_ARRAY was renamed to KILL_ARRAY_TASK. Remove it since it is not used anywhere in the code. See https://github.com/SchedMD/slurm/commit/987557a26e547978cfe795d14d9887c4854008ae Fixes: PySlurm/pyslurm#324 --- pyslurm/pydefines/slurm_defines.pxi | 1 - pyslurm/slurm/slurm.h.pxi | 1 - 2 files changed, 2 deletions(-) diff --git a/pyslurm/pydefines/slurm_defines.pxi b/pyslurm/pydefines/slurm_defines.pxi index f700a839..b741d382 100644 --- a/pyslurm/pydefines/slurm_defines.pxi +++ b/pyslurm/pydefines/slurm_defines.pxi @@ -409,7 +409,6 @@ TRIGGER_TYPE_PRI_DB_RES_OP = slurm.TRIGGER_TYPE_PRI_DB_RES_OP TRIGGER_TYPE_BURST_BUFFER = slurm.TRIGGER_TYPE_BURST_BUFFER KILL_JOB_BATCH = slurm.KILL_JOB_BATCH -KILL_JOB_ARRAY = slurm.KILL_JOB_ARRAY KILL_STEPS_ONLY = slurm.KILL_STEPS_ONLY KILL_FULL_JOB = slurm.KILL_FULL_JOB KILL_FED_REQUEUE = slurm.KILL_FED_REQUEUE diff --git a/pyslurm/slurm/slurm.h.pxi b/pyslurm/slurm/slurm.h.pxi index 3605e5a7..e7d89ad5 100644 --- a/pyslurm/slurm/slurm.h.pxi +++ b/pyslurm/slurm/slurm.h.pxi @@ -464,7 +464,6 @@ cdef extern from "slurm/slurm.h": uint8_t ASSOC_MGR_INFO_FLAG_USERS uint8_t ASSOC_MGR_INFO_FLAG_QOS uint8_t KILL_JOB_BATCH - uint8_t KILL_JOB_ARRAY uint8_t KILL_STEPS_ONLY uint8_t KILL_FULL_JOB uint8_t KILL_FED_REQUEUE