diff --git a/.gitignore b/.gitignore index dc6a0db..d3cbe9a 100644 --- a/.gitignore +++ b/.gitignore @@ -173,3 +173,8 @@ ispypsa_runs/**/*.hdf5 # ignore doit database .doit* + +# repomix for ai coding +.repomixignore +repomix-output.md= +repomix.config.json diff --git a/demo_flow_path_costs.py b/demo_flow_path_costs.py new file mode 100644 index 0000000..6ee2037 --- /dev/null +++ b/demo_flow_path_costs.py @@ -0,0 +1,41 @@ +""" +Demo script to test the flow path cost templating function. +""" + +from pathlib import Path + +from ispypsa.data_fetch import read_csvs +from ispypsa.logging import configure_logging +from ispypsa.templater.flow_paths import _template_sub_regional_flow_path_costs + +configure_logging() + + +def main(): + """Run the demo.""" + # Define root folder for data + root_folder = Path("ispypsa_runs") + workbook_cache_dir = root_folder / "workbook_table_cache" + + print("Loading test data...") + iasr_tables = read_csvs(workbook_cache_dir) + print(f"Loaded {len(iasr_tables)} tables") + + # Process each scenario + scenarios = ["Step Change", "Progressive Change", "Green Energy Exports"] + + for scenario in scenarios: + results = _template_sub_regional_flow_path_costs(iasr_tables, scenario) + print(f"Found {len(results['flow_path'].unique())} flow paths") + print("\nSample results:") + print(results) + + # Save results to CSV + scenario_name = scenario.lower().replace(" ", "_") + output_file = Path(f"flow_path_costs_{scenario_name}.csv") + results.to_csv(output_file, index=False) + print(f"\nResults saved to: {output_file}") + + +if __name__ == "__main__": + main() diff --git a/example_workflow.py b/example_workflow.py index c9f2ce2..937609c 100644 --- a/example_workflow.py +++ b/example_workflow.py @@ -100,7 +100,6 @@ operational_timeseries_location, ) - network.optimize.fix_optimal_capacities() # Never use network.optimize() as this will remove custom constraints. diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000..60ae4c7 --- /dev/null +++ b/package-lock.json @@ -0,0 +1,6 @@ +{ + "name": "ISPyPSA", + "lockfileVersion": 3, + "requires": true, + "packages": {} +} diff --git a/package.json b/package.json new file mode 100644 index 0000000..0967ef4 --- /dev/null +++ b/package.json @@ -0,0 +1 @@ +{} diff --git a/pyproject.toml b/pyproject.toml index c03a857..c7282eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,14 +10,14 @@ authors = [ dependencies = [ "pandas>=2.2.2", "pypsa>=0.31.1", - "isp-workbook-parser>=2.4.1", "pyyaml>=6.0.2", "doit>=0.36.0", "xmltodict>=0.13.0", "thefuzz>=0.22.1", - "isp-trace-parser>=1.0.0", "pyarrow>=18.0.0", "tables>=3.10.1", + "isp-trace-parser>=1.0.3", + "isp-workbook-parser>=2.5.0", ] readme = "README.md" requires-python = ">= 3.10" diff --git a/src/ispypsa/iasr_table_caching/local_cache.py b/src/ispypsa/iasr_table_caching/local_cache.py index 359aab7..952aede 100644 --- a/src/ispypsa/iasr_table_caching/local_cache.py +++ b/src/ispypsa/iasr_table_caching/local_cache.py @@ -2,7 +2,16 @@ from isp_workbook_parser import Parser -from ..templater.mappings import _GENERATOR_PROPERTIES +from ..templater.mappings import ( + _ACTIONABLE_ISP_PROJECTS_TABLES, + _FLOW_PATH_AGUMENTATION_TABLES, + _FLOW_PATH_AUGMENTATION_COST_TABLES, + _GENERATOR_PROPERTIES, + _PREPATORY_ACTIVITIES_TABLES, + _REZ_AUGMENTATION_COST_TABLES, + _REZ_CONNECTION_AGUMENTATION_TABLES, + _REZ_CONNECTION_PREPATORY_ACTIVITIES_TABLES, +) _GENERATOR_PROPERTY_TABLES = [ table_name @@ -34,6 +43,17 @@ "initial_build_limits", ] +_NETWORK_REQUIRED_TABLES = ( + _NETWORK_REQUIRED_TABLES + + _FLOW_PATH_AGUMENTATION_TABLES + + _FLOW_PATH_AUGMENTATION_COST_TABLES + + _PREPATORY_ACTIVITIES_TABLES + + _ACTIONABLE_ISP_PROJECTS_TABLES + + _REZ_CONNECTION_AGUMENTATION_TABLES + + _REZ_AUGMENTATION_COST_TABLES + + _REZ_CONNECTION_PREPATORY_ACTIVITIES_TABLES +) + _GENERATORS_STORAGE_REQUIRED_SUMMARY_TABLES = [ "existing_generators_summary", "committed_generators_summary", diff --git a/src/ispypsa/model/build.py b/src/ispypsa/model/build.py index 18ce04a..13e117c 100644 --- a/src/ispypsa/model/build.py +++ b/src/ispypsa/model/build.py @@ -60,28 +60,29 @@ def build_pypsa_network( network, pypsa_friendly_tables["buses"], path_to_pypsa_friendly_timeseries_data ) - if not pypsa_friendly_tables["custom_constraints_generators"].empty: - _add_bus_for_custom_constraints(network) - _add_lines_to_network(network, pypsa_friendly_tables["lines"]) - _add_custom_constraint_generators_to_network( - network, pypsa_friendly_tables["custom_constraints_generators"] - ) - _add_generators_to_network( network, pypsa_friendly_tables["generators"], path_to_pypsa_friendly_timeseries_data, ) + if "custom_constraints_generators" in pypsa_friendly_tables.keys(): + _add_bus_for_custom_constraints(network) + + _add_custom_constraint_generators_to_network( + network, pypsa_friendly_tables["custom_constraints_generators"] + ) + # The underlying linopy model needs to get built so we can add custom constraints. network.optimize.create_model() - _add_custom_constraints( - network, - pypsa_friendly_tables["custom_constraints_rhs"], - pypsa_friendly_tables["custom_constraints_lhs"], - ) + if "custom_constraints_rhs" in pypsa_friendly_tables: + _add_custom_constraints( + network, + pypsa_friendly_tables["custom_constraints_rhs"], + pypsa_friendly_tables["custom_constraints_lhs"], + ) return network diff --git a/src/ispypsa/templater/create_template.py b/src/ispypsa/templater/create_template.py index 8956fcb..aec0213 100644 --- a/src/ispypsa/templater/create_template.py +++ b/src/ispypsa/templater/create_template.py @@ -10,6 +10,8 @@ ) from ispypsa.templater.flow_paths import ( _template_regional_interconnectors, + _template_rez_transmission_costs, + _template_sub_regional_flow_path_costs, _template_sub_regional_flow_paths, ) from ispypsa.templater.nodes import ( @@ -98,12 +100,8 @@ def create_ispypsa_inputs_template( Returns: dictionary of dataframes in the `ISPyPSA` format. (add link to ispypsa table docs) """ - template = {} - transmission_expansion_costs = manually_extracted_tables.pop( - "transmission_expansion_costs" - ) template.update(manually_extracted_tables) if regional_granularity == "sub_regions": @@ -112,7 +110,11 @@ def create_ispypsa_inputs_template( ) template["flow_paths"] = _template_sub_regional_flow_paths( - iasr_tables["flow_path_transfer_capability"], transmission_expansion_costs + iasr_tables["flow_path_transfer_capability"] + ) + + template["flow_path_expansion_costs"] = _template_sub_regional_flow_path_costs( + iasr_tables, scenario ) elif regional_granularity == "nem_regions": @@ -137,6 +139,20 @@ def create_ispypsa_inputs_template( iasr_tables["initial_build_limits"] ) + possible_rez_or_constraint_names = list( + set( + list(template["renewable_energy_zones"]["rez_id"]) + + list(template["rez_group_constraints_rhs"]["constraint_id"]) + + list(template["rez_transmission_limit_constraints_rhs"]["constraint_id"]) + ) + ) + + template["rez_transmission_expansion_costs"] = _template_rez_transmission_costs( + iasr_tables, + scenario, + possible_rez_or_constraint_names, + ) + template["ecaa_generators"] = _template_ecaa_generators_static_properties( iasr_tables ) diff --git a/src/ispypsa/templater/flow_paths.py b/src/ispypsa/templater/flow_paths.py index 477c14c..19a793e 100644 --- a/src/ispypsa/templater/flow_paths.py +++ b/src/ispypsa/templater/flow_paths.py @@ -1,17 +1,22 @@ import logging import re -from pathlib import Path import pandas as pd from .helpers import ( + _fuzzy_match_names, _snakecase_string, + _strip_all_text_after_numeric_value, +) +from .mappings import ( + _FLOW_PATH_CONFIG, + _HVDC_FLOW_PATHS, + _REZ_CONFIG, ) -from .mappings import _HVDC_FLOW_PATHS def _template_sub_regional_flow_paths( - flow_path_capabilities: pd.DataFrame, transmission_expansion_costs: pd.DataFrame + flow_path_capabilities: pd.DataFrame, ) -> pd.DataFrame: """Processes the 'Flow path transfer capability' table into an ISPyPSA template format. @@ -19,8 +24,6 @@ def _template_sub_regional_flow_paths( Args: flow_path_capabilities: pd.DataFrame IASR table specifying the flow path transfer capabilities between subregions - transmission_expansion_costs: pd.DataFrame specifying the transmission - expansion costs for each flow path. Returns: `pd.DataFrame`: ISPyPSA sub-regional flow path template @@ -33,7 +36,7 @@ def _template_sub_regional_flow_paths( # Only keep forward_direction_mw_summer_typical limit col as that all that's # being used for now. cols = [ - "flow_path_name", + "flow_path", "node_from", "node_to", "carrier", @@ -41,13 +44,6 @@ def _template_sub_regional_flow_paths( ] sub_regional_capabilities = sub_regional_capabilities.loc[:, cols] - sub_regional_capabilities = pd.merge( - sub_regional_capabilities, - transmission_expansion_costs, - how="left", - on="flow_path_name", - ) - return sub_regional_capabilities @@ -72,7 +68,7 @@ def _template_regional_interconnectors( # Only keep forward_direction_mw_summer_typical limit col as that all that's # being used for now. cols = [ - "flow_path_name", + "flow_path", "node_from", "node_to", "carrier", @@ -107,17 +103,14 @@ def _get_flow_path_name_from_to_carrier( from_to_desc["carrier"] = from_to_desc.apply( lambda row: "DC" if any( - [ - dc_line in row["descriptor"] - for dc_line in _HVDC_FLOW_PATHS["flow_path_name"] - ] + [dc_line in row["descriptor"] for dc_line in _HVDC_FLOW_PATHS["flow_path"]] ) # manually detect Basslink since the name is not in the descriptor or (row["node_from"] == "TAS" and row["node_to"] == "VIC") else "AC", axis=1, ) - from_to_desc["flow_path_name"] = from_to_desc.apply( + from_to_desc["flow_path"] = from_to_desc.apply( lambda row: _determine_flow_path_name( row.node_from, row.node_to, @@ -150,7 +143,7 @@ def _determine_flow_path_name( name = _HVDC_FLOW_PATHS.loc[ (_HVDC_FLOW_PATHS.node_from == node_from) & (_HVDC_FLOW_PATHS.node_to == node_to), - "flow_path_name", + "flow_path", ].iat[0] elif descriptor and ( match := re.search( @@ -184,3 +177,686 @@ def _clean_capability_column_names(capability_df: pd.DataFrame) -> pd.DataFrame: col_name = _snakecase_string(direction + " (MW) " + qualifier) capability_columns.append(capability_df[col].rename(col_name)) return pd.concat(capability_columns, axis=1) + + +def _template_sub_regional_flow_path_costs( + iasr_tables: dict[str, pd.DataFrame], scenario: str +) -> pd.DataFrame: + """ + Process flow path augmentation options and cost forecasts to find the least cost + options for each flow path, return results in `ISPyPSA` format. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. Relevant DataFrames + include: + - Augmentation tables: columns include 'flow_path', 'option_name', + 'transfer_increase_forward_direction_mw', and + 'transfer_increase_reverse_direction_mw' + - Cost tables: columns include 'flow_path', 'option_name', and + financial year columns + - Preparatory activities: columns include 'flow_path', and financial + year columns + - Actionable projects: columns include 'flow_path', and financial year + columns + + Returns: + pd.DataFrame containing the least cost option for each flow path. Columns: + - flow_path + - option_name + - nominal_flow_limit_increase_mw + - _$/mw (one column per year, e.g., '2024_25_$/mw') + """ + return process_transmission_costs( + iasr_tables=iasr_tables, scenario=scenario, config=_FLOW_PATH_CONFIG + ) + + +def _template_rez_transmission_costs( + iasr_tables: dict[str, pd.DataFrame], + scenario: str, + possible_rez_or_constraint_names, +) -> pd.DataFrame: + """ + Process REZ augmentation options and cost forecasts to find least cost options for + each REZ, return results in `ISPyPSA` format. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. Relevant DataFrames + include: + - Augmentation tables: columns include 'rez_constraint_id', 'option', + and 'additional_network_capacity_mw' + - Cost tables: columns include 'rez_constraint_id', 'option', and + columns for each financial year (e.g., '2024-25', '2025-26', ...) + scenario: str specifying the scenario name (e.g., "Step Change", + "Progressive Change"). + possible_rez_or_constraint_names: list of possible names that cost data should + map to. The cost data is known to contain typos so the names in the cost + data are fuzzy match to the names provided in this input variable. + + Returns: + pd.DataFrame containing the least cost option for each REZ. Columns: + - rez_constraint_id + - option + - additional_network_capacity_mw + - _$/mw (cost per MW for each year, e.g., '2024_25_$/mw') + """ + rez_costs = process_transmission_costs( + iasr_tables=iasr_tables, scenario=scenario, config=_REZ_CONFIG + ) + rez_costs["rez_constraint_id"] = _fuzzy_match_names( + rez_costs["rez_constraint_id"], + possible_rez_or_constraint_names, + task_desc="Processing rez transmission costs", + ) + return rez_costs + + +def process_transmission_costs( + iasr_tables: dict[str, pd.DataFrame], scenario: str, config: dict +) -> pd.DataFrame: + """ + Generic function to process transmission costs (flow path or REZ). + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables + scenario: str specifying the scenario name + config: dict with processing configuration containing: + - transmission_type: str, either "flow_path" or "rez" + - in_coming_column_mappings: dict mapping standard column names to + rez or flow path specific names + - table_names: dict with augmentation and cost table lists + - mappings: dict with mappings for preparatory activities and other data + + Returns: + pd.DataFrame containing the least cost options with standardized column + structure + """ + cost_scenario = _determine_cost_scenario(scenario) + + # Get and process augmentation table + aug_table = _get_augmentation_table(iasr_tables=iasr_tables, config=config) + + # Get and process cost table + cost_table = _get_cost_table( + iasr_tables=iasr_tables, cost_scenario=cost_scenario, config=config + ) + + # Find the least cost options + final_costs = _get_least_cost_options( + aug_table=aug_table, cost_table=cost_table, config=config + ) + + return final_costs + + +def _determine_cost_scenario(scenario: str) -> str: + """ + Map ISP scenario to flow path/rez cost scenario. + + Args: + scenario: str specifying the scenario name. Must be one of "Step Change", + "Green Energy Exports", or "Progressive Change". + + Returns: + str specifying the internal scenario key (e.g., + "step_change_and_green_energy_exports" or "progressive_change"). + """ + if scenario in ["Step Change", "Green Energy Exports"]: + return "step_change_and_green_energy_exports" + elif scenario == "Progressive Change": + return "progressive_change" + else: + raise ValueError(f"scenario: {scenario} not recognised.") + + +def _get_augmentation_table( + iasr_tables: dict[str, pd.DataFrame], config: dict +) -> pd.DataFrame: + """ + Concatenate and clean all augmentation tables for a given transmission type. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. Relevant tables + must contain columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - capacity (nominal_flow_limit_increase_mw or + additional_network_capacity_mw) + config: dict with processing configuration containing: + - in_coming_column_mappings: dict mapping standard column names to type-specific names + - table_names: dict with augmentation table lists + + Returns: + pd.DataFrame containing the concatenated augmentation table. Columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - capacity (nominal_flow_limit_increase_mw or additional_network_capacity_mw) + """ + table_names = config["table_names"]["augmentation"] + missing = [t for t in table_names if t not in iasr_tables] + if missing: + logging.warning(f"Missing augmentation tables: {missing}") + aug_tables = [ + iasr_tables[table_name] + for table_name in table_names + if table_name in iasr_tables + ] + if not aug_tables: + raise ValueError( + f"No {config['transmission_tye']} augmentation tables found in iasr_tables." + ) + aug_table = pd.concat(aug_tables, ignore_index=True) + aug_table = _clean_augmentation_table_column_names(aug_table, config) + aug_table = _clean_augmentation_table_column_values(aug_table, config) + return aug_table + + +def _get_cost_table( + iasr_tables: dict[str, pd.DataFrame], cost_scenario: str, config: dict +) -> pd.DataFrame: + """ + Combine all cost tables, preparatory activities, and actionable projects for a given + scenario into a single DataFrame. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. Cost tables must + have columns: + - id (flow_path or rez_constraint_id) + - option (Option Name or Option) + - (e.g., '2024-25', ...) + flow_path_scenario: str specifying the cost scenario name. + config: dict with processing configuration containing: + - transmission_type: str, either "flow_path" or "rez" + - column_mappings: dict mapping standard column names to rez/flow path names + - table_names: dict with cost table lists + - mappings: dict with option name mappings for preparatory activities and + actionable isp data + + Returns: + pd.DataFrame containing the combined cost table. Columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - (e.g., '2024_25', ...) + """ + cost_table_names = config["table_names"]["cost"][cost_scenario] + cost_table = _get_cleaned_cost_tables(iasr_tables, cost_table_names, config) + prep_activities = _get_prep_activities_table(iasr_tables, cost_scenario, config) + actionable_projects = _get_actionable_projects_table( + iasr_tables, cost_scenario, config + ) + return _combine_cost_tables(cost_table, prep_activities, actionable_projects) + + +def _get_least_cost_options( + aug_table: pd.DataFrame, cost_table: pd.DataFrame, config: dict +) -> pd.DataFrame: + """ + For each transmission, select the augmentation option with the lowest cost per MW of + increased capacity, using the first year with complete costs for all options. The + selected option and its costs per MW are used for all years. + + Args: + aug_table: pd.DataFrame containing columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - capacity (nominal_flow_limit_increase_mw or additional_network_capacity_mw) + cost_table: pd.DataFrame containing columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - (e.g., '2024_25', ...) + config: dict with processing configuration containing: + - transmission_type: str, either "flow_path" or "rez" + - in_coming_column_mappings: dict mapping standard column names to + type-specific names + + Returns: + pd.DataFrame containing columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - capacity (nominal_flow_limit_increase_mw or additional_network_capacity_mw) + - _$/mw (cost per MW for each year, e.g., '2024_25_$/mw') + """ + year_cols = _get_year_columns(cost_table) + valid_costs_df = _find_first_year_with_complete_costs(cost_table, year_cols) + valid_costs_df["option"] = _fuzzy_match_names( + valid_costs_df["option"], + aug_table["option"], + "matching transmission augmentation options and costs", + not_match="existing", + threshold=80, + ) + transmission_analysis = pd.merge( + aug_table, valid_costs_df, on=["id", "option"], how="inner" + ) + _log_unmatched_transmission_options( + aug_table, valid_costs_df, transmission_analysis + ) + transmission_analysis["cost_per_mw"] = ( + transmission_analysis["cost"] + / transmission_analysis["nominal_capacity_increase"] + ) + least_cost_options = transmission_analysis.loc[ + transmission_analysis.groupby("id")["cost_per_mw"].idxmin() + ] + final_costs = pd.merge( + cost_table, + least_cost_options[["id", "option", "nominal_capacity_increase"]], + on=["id", "option"], + how="inner", + ) + # Divide each financial year column by capacity and rename with _$/mw suffix + for year_col in year_cols: + new_col = f"{year_col}_$/mw" + final_costs[new_col] = ( + final_costs[year_col] / final_costs["nominal_capacity_increase"] + ) + final_costs.drop(columns=year_col, inplace=True) + final_costs = final_costs.rename(columns=config["out_going_column_mappings"]) + return final_costs + + +def _clean_augmentation_table_column_names( + aug_table: pd.DataFrame, config: dict +) -> pd.DataFrame: + """ + Clean and rename columns in the augmentation table. + + Args: + aug_table: pd.DataFrame specifying the augmentation table. + config: dict with processing configuration containing: + - in_coming_column_mappings: dict mapping standard column names to type-specific names + + Returns: + pd.DataFrame containing the cleaned and renamed augmentation table. + """ + # Map specific columns to standardized names + # Reverse the in_coming_column_mappings dict to go from specific -> generic + aug_table = aug_table.rename(columns=config["in_coming_column_mappings"]) + cols_to_keep = list( + set( + [ + col + for col in config["in_coming_column_mappings"].values() + if col in aug_table.columns + ] + ) + ) + return aug_table.loc[:, cols_to_keep] + + +def _clean_augmentation_table_column_values( + aug_table: pd.DataFrame, config: dict +) -> pd.DataFrame: + """ + Prepare and typecast augmentation table columns for analysis. + + Args: + aug_table: pd.DataFrame containing transmission-specific columns + config: dict with processing configuration containing: + - transmission_type: str specifying the type of transmission + - in_coming_column_mappings: dict mapping standard column names to + flow path/rez names + + Returns: + pd.DataFrame containing standardized columns: + - id + - option + - nominal_capacity_increase + """ + transmission_type = config["transmission_type"] + + # Handle flow path special case: calculate capacity as max of forward and reverse + if transmission_type == "flow_path": + aug_table["forward_capacity_increase"] = pd.to_numeric( + _strip_all_text_after_numeric_value(aug_table["forward_capacity_increase"]), + errors="coerce", + ) + aug_table["reverse_capacity_increase"] = pd.to_numeric( + _strip_all_text_after_numeric_value(aug_table["reverse_capacity_increase"]), + errors="coerce", + ) + aug_table["nominal_capacity_increase"] = aug_table[ + ["forward_capacity_increase", "reverse_capacity_increase"] + ].max(axis=1) + else: + aug_table["nominal_capacity_increase"] = pd.to_numeric( + _strip_all_text_after_numeric_value(aug_table["nominal_capacity_increase"]), + errors="coerce", + ) + return aug_table + + +def _get_cleaned_cost_tables( + iasr_tables: dict[str, pd.DataFrame], cost_table_names: list, config: dict +) -> pd.DataFrame: + """ + Retrieve, clean, concatenate, and filter all cost tables for a scenario and + transmission type. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. + cost_table_names: list of str specifying the names of cost tables to extract + and clean. + config: dict with processing configuration containing: + - in_coming_column_mappings: dict mapping standard column names to + flow path / rez names + + Returns: + pd.DataFrame containing the concatenated and filtered cost tables. Columns: + - id + - option + - (e.g., '2024_25', ...) + """ + missing = [t for t in cost_table_names if t not in iasr_tables] + if missing: + logging.warning(f"Missing cost tables: {missing}") + cost_tables = [] + for table_name in cost_table_names: + if table_name not in iasr_tables: + continue + table = iasr_tables[table_name].copy() + table = table.rename(columns=config["in_coming_column_mappings"]) + cost_tables.append(table) + if not cost_tables: + raise ValueError("No cost tables found in iasr_tables.") + cost_table = pd.concat(cost_tables, ignore_index=True) + cost_table.columns = [_snakecase_string(col) for col in cost_table.columns] + forecast_year_cols = [ + col for col in cost_table.columns if re.match(r"^\d{4}_\d{2}$", col) + ] + cost_table[forecast_year_cols[0]] = pd.to_numeric( + cost_table[forecast_year_cols[0]], errors="coerce" + ) + cost_table = cost_table.dropna(subset=forecast_year_cols, how="all") + return cost_table + + +def _get_prep_activities_table( + iasr_tables: dict[str, pd.DataFrame], cost_scenario: str, config: dict +) -> pd.DataFrame: + """ + Process the preparatory activities table for a given transmission type. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. + cost_scenario: str specifying the internal scenario key. + config: dict with processing configuration containing: + - mappings: dict with mappings for preparatory activities and other data + + Returns: + pd.DataFrame containing the aggregated preparatory activities. Columns: + - id + - option + - (e.g., '2024_25', '2025_26', ...) + """ + transmission_type = config["transmission_type"] + if transmission_type == "flow_path": + prep_activities_table_name = ( + f"flow_path_augmentation_costs_{cost_scenario}_preparatory_activities" + ) + else: + prep_activities_table_name = ( + f"rez_augmentation_costs_{cost_scenario}_preparatory_activities" + ) + + if prep_activities_table_name not in iasr_tables: + logging.warning( + f"Missing preparatory activities table: {prep_activities_table_name}" + ) + # Return empty DataFrame with expected columns + return pd.DataFrame(columns=["id", "option"]) + + prep_activities = iasr_tables[prep_activities_table_name].copy() + prep_activities = prep_activities.rename( + columns=config["in_coming_column_mappings"] + ) + prep_activities.columns = [ + _snakecase_string(col) for col in prep_activities.columns + ] + prep_activities = prep_activities.drop( + columns=[col for col in prep_activities.columns if "unnamed" in col] + ) + + if transmission_type == "flow_path": + # Flow path preparatory activities processing + # Validate 'flow_path' values + invalid_flow_paths = set(prep_activities["id"]) - set( + config["mappings"]["prep_activities_name_to_option"].keys() + ) + if invalid_flow_paths: + raise ValueError( + f"Missing mapping values for the flow paths provided: {sorted(invalid_flow_paths)}. " + f"Please ensure these are present in templater/mappings.py." + ) + prep_activities["option"] = prep_activities["id"].map( + config["mappings"]["prep_activities_name_to_option"] + ) + + # Validate 'option_name' values + invalid_option_names = set(prep_activities["option"]) - set( + config["mappings"]["option_to_id"].keys() + ) + if invalid_option_names: + raise ValueError( + f"Missing mapping values for the option names provided: {sorted(invalid_option_names)}. " + f"Please ensure these are present in templater/mappings.py." + ) + prep_activities = prep_activities.groupby("option").sum().reset_index() + prep_activities["id"] = prep_activities["option"].map( + config["mappings"]["option_to_id"] + ) + + elif transmission_type == "rez": + # Validate REZ names/IDs + invalid_rez_names = set(prep_activities["rez"]) - set( + config["prep_activities_mapping"].keys() + ) + if invalid_rez_names: + raise ValueError( + f"Missing mapping values for the REZ names provided: {sorted(invalid_rez_names)}. " + f"Please ensure these are present in templater/mappings.py." + ) + + prep_activities["option"] = prep_activities["rez"].apply( + lambda x: config["prep_activities_mapping"][x][1] + ) + prep_activities["id"] = prep_activities["rez"].apply( + lambda x: config["prep_activities_mapping"][x][0] + ) + return _sort_cols(prep_activities, ["id", "option"]) + + +def _get_actionable_projects_table( + iasr_tables: dict[str, pd.DataFrame], cost_scenario: str, config: dict +) -> pd.DataFrame: + """ + Process the actionable ISP projects table for flow paths. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. Table must have + columns: + - id (flow_path) + - (e.g., '2024-25', ...) + cost_scenario: str specifying the internal scenario key. + config: dict with processing configuration containing: + - mappings: dict with mappings for actionable projects + + Returns: + pd.DataFrame containing the actionable projects table. Columns: + - id (flow_path) + - option (option_name) + - (e.g., '2024_25', '2025_26', ...) + """ + transmission_type = config["transmission_type"] + + # REZ has no actionable projects, return empty DataFrame + if transmission_type == "rez": + return pd.DataFrame(columns=["id", "option"]) + + # Process flow path actionable projects + actionable_projects_table_name = ( + f"flow_path_augmentation_costs_{cost_scenario}_actionable_isp_projects" + ) + + if actionable_projects_table_name not in iasr_tables: + logging.warning( + f"Missing actionable ISP projects table: {actionable_projects_table_name}" + ) + # Return empty DataFrame with expected columns + return pd.DataFrame(columns=["id", "option"]) + + actionable_projects = iasr_tables[actionable_projects_table_name].copy() + actionable_projects = actionable_projects.rename( + columns=config["in_coming_column_mappings"] + ) + actionable_projects.columns = [ + _snakecase_string(col) for col in actionable_projects.columns + ] + actionable_projects = actionable_projects.drop( + columns=[col for col in actionable_projects.columns if "unnamed" in col] + ) + + # Validate 'flow_path' values + invalid_flow_paths = set(actionable_projects["id"]) - set( + config["mappings"]["actionable_name_to_option"].keys() + ) + if invalid_flow_paths: + raise ValueError( + f"Missing mapping values for the flow paths provided: {sorted(invalid_flow_paths)}. " + f"Please ensure these are present in {config['mappings']['actionable_name_to_option']}." + ) + actionable_projects["option"] = actionable_projects["id"].map( + config["mappings"]["actionable_name_to_option"] + ) + + # Validate 'option_name' values + invalid_option_names = set(actionable_projects["option"]) - set( + config["mappings"]["actionable_option_to_id"].keys() + ) + if invalid_option_names: + raise ValueError( + f"Missing mapping values for the option names provided: {sorted(invalid_option_names)}. " + f"Please ensure these are present in {config['mappings']['actionable_option_to_id']}." + ) + actionable_projects["id"] = actionable_projects["option"].map( + config["mappings"]["actionable_option_to_id"] + ) + + return _sort_cols(actionable_projects, ["id", "option"]) + + +def _combine_cost_tables( + cost_table: pd.DataFrame, + prep_activities: pd.DataFrame, + actionable_projects: pd.DataFrame, +) -> pd.DataFrame: + """ + Combine the cost table, preparatory activities table, and actionable projects table + into a single DataFrame. + + Args: + cost_table: pd.DataFrame specifying the cost table. + prep_activities: pd.DataFrame specifying the preparatory activities table. + actionable_projects: pd.DataFrame specifying the actionable projects table. + + Returns: + pd.DataFrame containing the combined cost table. + """ + tables = [cost_table, prep_activities] + + # Only include actionable_projects if it's not empty + if not actionable_projects.empty: + tables.append(actionable_projects) + + return pd.concat(tables, ignore_index=True) + + +def _get_year_columns(cost_table: pd.DataFrame) -> list: + """ + Get the financial year columns from the cost table. + + Args: + cost_table: pd.DataFrame specifying the cost table. + + Returns: + list of str specifying the financial year columns. + """ + year_cols = [col for col in cost_table.columns if re.match(r"\d{4}_\d{2}", col)] + if not year_cols: + raise ValueError("No financial year columns found in cost table") + return year_cols + + +def _find_first_year_with_complete_costs( + cost_table: pd.DataFrame, year_cols: list +) -> pd.DataFrame: + """ + Find the first year with complete costs for each transmission. + + Args: + cost_table: pd.DataFrame specifying the cost table with columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - (e.g., '2024_25', ...) + year_cols: list of str specifying the financial year column names. + + Returns: + pd.DataFrame containing columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - cost + - first_valid_year_col + """ + valid_cost_rows = [] + missing_full_year_transmissions = [] + for transmission, group in cost_table.groupby("id"): + found = False + # Iterate through years (sort years based of first int in year string) + for year in sorted(year_cols, key=lambda y: int(y.split("_")[0])): + costs = pd.to_numeric(group[year], errors="coerce") + if not costs.isna().any(): + for idx, row in group.iterrows(): + entry = row[["id", "option"]].to_dict() + entry["cost"] = costs.loc[idx] + entry["first_valid_year_col"] = year + valid_cost_rows.append(entry) + found = True + break + if not found: + missing_full_year_transmissions.append(transmission) + if missing_full_year_transmissions: + raise ValueError( + f"No year found with all non-NA costs for transmissions: {missing_full_year_transmissions}" + ) + return pd.DataFrame(valid_cost_rows) + + +def _log_unmatched_transmission_options( + aug_table: pd.DataFrame, valid_costs_df: pd.DataFrame, merged_df: pd.DataFrame +): + """ + Logs (id, option) pairs that were dropped from each side during the merge. + """ + left_keys = set(tuple(x) for x in aug_table[["id", "option"]].values) + right_keys = set(tuple(x) for x in valid_costs_df[["id", "option"]].values) + merged_keys = set(tuple(x) for x in merged_df[["id", "option"]].values) + + dropped_from_left = left_keys - merged_keys + dropped_from_right = right_keys - merged_keys + + if dropped_from_left: + logging.info( + f"Dropped options from augmentation table: {sorted(dropped_from_left)}" + ) + if dropped_from_right: + logging.info(f"Dropped options from cost table: {sorted(dropped_from_right)}") + + +def _sort_cols(table: pd.DataFrame, start_cols: list[str]) -> pd.DataFrame: + """ + Reorder a pd.DataFrame's column using the fixed order provided in start_cols and + then sorting the remaining columns alphabetically. + """ + remaining_cols = list(set(table.columns) - set(start_cols)) + sorted_remaining_columns = sorted(remaining_cols) + return table.loc[:, start_cols + sorted_remaining_columns] diff --git a/src/ispypsa/templater/helpers.py b/src/ispypsa/templater/helpers.py index a622bf0..4a7eeb2 100644 --- a/src/ispypsa/templater/helpers.py +++ b/src/ispypsa/templater/helpers.py @@ -227,3 +227,18 @@ def _convert_financial_year_columns_to_float(df: pd.DataFrame) -> pd.DataFrame: for col in df.columns ] return pd.concat(cols, axis=1) + + +def _strip_all_text_after_numeric_value( + series: pd.Index | pd.Series, +) -> pd.Index | pd.Series: + """ + Removes all text after the first numeric value. + + Numeric value can contain commas and one period. + """ + if series.dtypes == "object": + series = series.astype(str).str.replace( + r"^([0-9\.\,+]+)\s+.*", r"\1", regex=True + ) + return series diff --git a/src/ispypsa/templater/manually_extracted_template_tables/6.0/transmission_expansion_costs.csv b/src/ispypsa/templater/manually_extracted_template_tables/6.0/transmission_expansion_costs.csv deleted file mode 100644 index 55ddae1..0000000 --- a/src/ispypsa/templater/manually_extracted_template_tables/6.0/transmission_expansion_costs.csv +++ /dev/null @@ -1,14 +0,0 @@ -flow_path_name,indicative_transmission_expansion_cost_$/mw -CQ-NQ,1.126363636 -CQ-GG,0.838709677 -SQ-CQ,0.513333333 -NNSW-SQ,1.702027027 -Terranora,NA -CNSW-NNSW,0.497666667 -CNSW-SNW,0.284 -SNSW-CNSW,0.502333333 -VIC-SNSW,2.00554939 -Heywood,0.454333333 -SESA-CSA,0.602333333 -Murraylink,NA -Basslink,3.646666667 diff --git a/src/ispypsa/templater/mappings.py b/src/ispypsa/templater/mappings.py index 3776069..4e19513 100644 --- a/src/ispypsa/templater/mappings.py +++ b/src/ispypsa/templater/mappings.py @@ -42,7 +42,7 @@ { "node_from": ["NNSW", "VIC", "TAS"], "node_to": ["SQ", "CSA", "VIC"], - "flow_path_name": ["Terranora", "Murraylink", "Basslink"], + "flow_path": ["Terranora", "Murraylink", "Basslink"], } ) @@ -318,3 +318,180 @@ }, ], } + + +# Subregion flow paths +_SUBREGION_FLOW_PATHS = [ + "CQ-NQ", + "CQ-GG", + "SQ-CQ", + "NNSW-SQ", + "CNSW-NNSW", + "CNSW-SNW", + "SNSW-CNSW", + "VIC-SNSW", + "TAS-VIC", + "VIC-SESA", + "SESA-CSA", +] + +_FLOW_PATH_AGUMENTATION_TABLES = [ + "flow_path_augmentation_options_" + fp for fp in _SUBREGION_FLOW_PATHS +] + +_REZ_CONNECTION_AGUMENTATION_TABLES = [ + "rez_augmentation_options_" + region for region in list(_NEM_REGION_IDS) +] + +_FLOW_PATH_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE = [ + "flow_path_augmentation_costs_progressive_change_" + fp + for fp in _SUBREGION_FLOW_PATHS +] + +_FLOW_PATH_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS = [ + "flow_path_augmentation_costs_step_change_and_green_energy_exports_" + fp + for fp in _SUBREGION_FLOW_PATHS +] + +_FLOW_PATH_AUGMENTATION_COST_TABLES = ( + _FLOW_PATH_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE + + _FLOW_PATH_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS +) + +_REZ_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE = [ + "rez_augmentation_costs_progressive_change_" + region + for region in list(_NEM_REGION_IDS) +] + +_REZ_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS = [ + "rez_augmentation_costs_step_change_and_green_energy_exports_" + region + for region in list(_NEM_REGION_IDS) +] + +_REZ_AUGMENTATION_COST_TABLES = ( + _REZ_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE + + _REZ_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS +) + +_FLOW_PATH_AGUMENTATION_NAME_ADJUSTMENTS = { + "Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Forward direction": "transfer_increase_forward_direction_MW", + "Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Reverse direction": "transfer_increase_reverse_direction_MW", +} + +_PREPATORY_ACTIVITIES_TABLES = [ + "flow_path_augmentation_costs_step_change_and_green_energy_exports_preparatory_activities", + "flow_path_augmentation_costs_progressive_change_preparatory_activities", +] + +_REZ_CONNECTION_PREPATORY_ACTIVITIES_TABLES = [ + "rez_augmentation_costs_step_change_and_green_energy_exports_preparatory_activities", + "rez_augmentation_costs_progressive_change_preparatory_activities", +] + +_ACTIONABLE_ISP_PROJECTS_TABLES = [ + "flow_path_augmentation_costs_step_change_and_green_energy_exports_actionable_isp_projects", + "flow_path_augmentation_costs_progressive_change_actionable_isp_projects", +] + +_PREPATORY_ACTIVITIES_NAME_TO_OPTION_NAME = { + "500kV QNI Connect (NSW works)": "NNSW–SQ Option 5", + "500kV QNI Connect (QLD works)": "NNSW–SQ Option 5", + "330kV QNI single circuit (NSW works)": "NNSW–SQ Option 1", + "330kV QNI single circuit (QLD works)": "NNSW–SQ Option 1", + "330kV QNI double circuit (NSW works)": "NNSW–SQ Option 2", + "330kV QNI double circuit (QLD works)": "NNSW–SQ Option 2", + "CQ-GG": "CQ-GG Option 1", + "Sydney Southern Ring": "CNSW-SNW Option 2", +} + +_REZ_PREPATORY_ACTIVITIES_NAME_TO_REZ_AND_OPTION_NAME = { + "Darling Downs REZ Expansion(Stage 1)": ["SWQLD1", "Option 1"], + "South East SA REZ": ["S1-TBMO", "Option 1"], + "South West Victoria REZ Option 1": ["SWV1", "Option 1"], + "South West Victoria REZ Option 1A": ["SWV1", "Option 1A"], + "South West Victoria REZ Option 1B": ["SWV1", "Option 1B"], + "South West Victoria REZ Option 1C": ["SWV1", "Option 1C"], + "South West Victoria REZ Option 2A": ["SWV1", "Option 2A"], + "South West Victoria REZ Option 2B": ["SWV1", "Option 2B"], + "South West Victoria REZ Option 3A": ["SWV1", "Option 3A"], + "South West Victoria REZ Option 3B": ["SWV1", "Option 3B"], +} + +_PREPATORY_ACTIVITIES_OPTION_NAME_TO_FLOW_PATH = { + "NNSW–SQ Option 5": "NNSW-SQ", + "NNSW–SQ Option 1": "NNSW-SQ", + "NNSW–SQ Option 2": "NNSW-SQ", + "CNSW-SNW Option 2": "CNSW-SNW", + "CQ-GG Option 1": "CQ-GG", +} + +_ACTIONABLE_ISP_PROJECTS_NAME_TO_OPTION_NAME = { + "Humelink": "SNSW-CNSW Option 1 (HumeLink)", + "VNI West": "VIC-SNSW Option 1 - VNI West (Kerang)", + "Project Marinus Stage 1": "TAS-VIC Option 1 (Project Marinus Stage 1)", + "Project Marinus Stage 2": "TAS-VIC Option 2 (Project Marinus Stage 2)", +} + +_ACTIONABLE_ISP_PROJECTS_OPTION_NAME_TO_FLOW_PATH = { + "SNSW-CNSW Option 1 (HumeLink)": "SNSW-CNSW", + "VIC-SNSW Option 1 - VNI West (Kerang)": "VIC-SNSW", + "TAS-VIC Option 1 (Project Marinus Stage 1)": "TAS-VIC", + "TAS-VIC Option 2 (Project Marinus Stage 2)": "TAS-VIC", +} + +# Transmission cost processing configurations +_FLOW_PATH_CONFIG = { + "transmission_type": "flow_path", + "in_coming_column_mappings": { + "Flow path": "id", + "Flow Path": "id", + "Option Name": "option", + "Option": "option", + "Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Forward direction": "forward_capacity_increase", + "Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Reverse direction": "reverse_capacity_increase", + }, + "out_going_column_mappings": { + "id": "flow_path", + "nominal_capacity_increase": "additional_network_capacity_mw", + }, + "table_names": { + "augmentation": _FLOW_PATH_AGUMENTATION_TABLES, + "cost": { + "progressive_change": _FLOW_PATH_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE, + "step_change_and_green_energy_exports": _FLOW_PATH_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS, + }, + "prep_activities": _PREPATORY_ACTIVITIES_TABLES, + "actionable_projects": _ACTIONABLE_ISP_PROJECTS_TABLES, + }, + "mappings": { + "prep_activities_name_to_option": _PREPATORY_ACTIVITIES_NAME_TO_OPTION_NAME, + "option_to_id": _PREPATORY_ACTIVITIES_OPTION_NAME_TO_FLOW_PATH, + "actionable_name_to_option": _ACTIONABLE_ISP_PROJECTS_NAME_TO_OPTION_NAME, + "actionable_option_to_id": _ACTIONABLE_ISP_PROJECTS_OPTION_NAME_TO_FLOW_PATH, + }, +} + +_REZ_CONFIG = { + "transmission_type": "rez", + "in_coming_column_mappings": { + "REZ constraint ID": "id", + "REZ / Constraint ID": "id", + "Option": "option", + "REZ": "rez", + "REZ Name": "rez", + "Additional network capacity (MW)": "nominal_capacity_increase", + }, + "out_going_column_mappings": { + "id": "rez_constraint_id", + "nominal_capacity_increase": "additional_network_capacity_mw", + }, + "table_names": { + "augmentation": _REZ_CONNECTION_AGUMENTATION_TABLES, + "cost": { + "progressive_change": _REZ_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE, + "step_change_and_green_energy_exports": _REZ_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS, + }, + "prep_activities": _REZ_CONNECTION_PREPATORY_ACTIVITIES_TABLES, + }, + "prep_activities_mapping": _REZ_PREPATORY_ACTIVITIES_NAME_TO_REZ_AND_OPTION_NAME, +} diff --git a/src/ispypsa/templater/nodes.py b/src/ispypsa/templater/nodes.py index f3a6b70..826d864 100644 --- a/src/ispypsa/templater/nodes.py +++ b/src/ispypsa/templater/nodes.py @@ -16,7 +16,8 @@ def _get_reference_node_locations(reference_nodes): # request and merge in substation coordinates for reference nodes - substation_coordinates = _request_transmission_substation_coordinates() + # substation_coordinates = _request_transmission_substation_coordinates() + # substation_coordinates = pd.DataFrame() if not substation_coordinates.empty: reference_node_col = process.extractOne( "reference_node", reference_nodes.columns diff --git a/src/ispypsa/templater/renewable_energy_zones.py b/src/ispypsa/templater/renewable_energy_zones.py index 179798d..f52a262 100644 --- a/src/ispypsa/templater/renewable_energy_zones.py +++ b/src/ispypsa/templater/renewable_energy_zones.py @@ -70,11 +70,13 @@ def _template_rez_build_limits( "rez_resource_limit_violation_penalty_factor_$m/mw": "rez_solar_resource_limit_violation_penalty_factor_$/mw", } ) + rez_build_limits["carrier"] = "AC" rez_build_limits = rez_build_limits.loc[ :, [ "rez_id", "isp_sub_region_id", + "carrier", "wind_generation_total_limits_mw_high", "wind_generation_total_limits_mw_medium", "wind_generation_total_limits_mw_offshore_floating", diff --git a/src/ispypsa/translator/create_pypsa_friendly_inputs.py b/src/ispypsa/translator/create_pypsa_friendly_inputs.py index 8c24c05..eff195d 100644 --- a/src/ispypsa/translator/create_pypsa_friendly_inputs.py +++ b/src/ispypsa/translator/create_pypsa_friendly_inputs.py @@ -15,9 +15,7 @@ create_pypsa_friendly_bus_demand_timeseries, ) from ispypsa.translator.custom_constraints import ( - _translate_custom_constraint_lhs, - _translate_custom_constraint_rhs, - _translate_custom_constraints_generators, + _translate_custom_constraints, ) from ispypsa.translator.generators import ( _create_unserved_energy_generators, @@ -25,13 +23,8 @@ create_pypsa_friendly_existing_generator_timeseries, ) from ispypsa.translator.lines import _translate_flow_paths_to_lines -from ispypsa.translator.mappings import ( - _CUSTOM_CONSTRAINT_EXPANSION_COSTS, - _CUSTOM_CONSTRAINT_LHS_TABLES, - _CUSTOM_CONSTRAINT_RHS_TABLES, -) from ispypsa.translator.renewable_energy_zones import ( - _translate_renewable_energy_zone_build_limits_to_flow_paths, + _translate_renewable_energy_zone_build_limits_to_lines, ) from ispypsa.translator.snapshots import ( _add_investment_periods, @@ -40,7 +33,7 @@ ) from ispypsa.translator.temporal_filters import _filter_snapshots -_BASE_TRANSLATOR_OUPUTS = [ +_BASE_TRANSLATOR_OUTPUTS = [ "snapshots", "investment_period_weights", "buses", @@ -205,24 +198,15 @@ def create_pypsa_friendly_inputs( if config.network.nodes.rezs == "discrete_nodes": buses.append(_translate_rezs_to_buses(ispypsa_tables["renewable_energy_zones"])) lines.append( - _translate_renewable_energy_zone_build_limits_to_flow_paths( + _translate_renewable_energy_zone_build_limits_to_lines( ispypsa_tables["renewable_energy_zones"], - config.network.rez_transmission_expansion, - config.wacc, - config.network.annuitisation_lifetime, - config.network.rez_to_sub_region_transmission_default_limit, + ispypsa_tables["rez_transmission_expansion_costs"], + config, ) ) if config.network.nodes.regional_granularity != "single_region": - lines.append( - _translate_flow_paths_to_lines( - ispypsa_tables["flow_paths"], - config.network.transmission_expansion, - config.wacc, - config.network.annuitisation_lifetime, - ) - ) + lines.append(_translate_flow_paths_to_lines(ispypsa_tables, config)) pypsa_inputs["buses"] = pd.concat(buses) @@ -231,29 +215,7 @@ def create_pypsa_friendly_inputs( else: pypsa_inputs["lines"] = pd.DataFrame() - custom_constraint_lhs_tables = [ - ispypsa_tables[table] for table in _CUSTOM_CONSTRAINT_LHS_TABLES - ] - pypsa_inputs["custom_constraints_lhs"] = _translate_custom_constraint_lhs( - custom_constraint_lhs_tables - ) - custom_constraint_rhs_tables = [ - ispypsa_tables[table] for table in _CUSTOM_CONSTRAINT_RHS_TABLES - ] - pypsa_inputs["custom_constraints_rhs"] = _translate_custom_constraint_rhs( - custom_constraint_rhs_tables - ) - custom_constraint_generators = [ - ispypsa_tables[table] for table in _CUSTOM_CONSTRAINT_EXPANSION_COSTS - ] - pypsa_inputs["custom_constraints_generators"] = ( - _translate_custom_constraints_generators( - custom_constraint_generators, - config.network.rez_transmission_expansion, - config.wacc, - config.network.annuitisation_lifetime, - ) - ) + pypsa_inputs.update(_translate_custom_constraints(config, ispypsa_tables)) return pypsa_inputs @@ -374,7 +336,7 @@ def create_pypsa_friendly_timeseries_inputs( def list_translator_output_files(output_path: Path | None = None) -> list[Path]: - files = _BASE_TRANSLATOR_OUPUTS + files = _BASE_TRANSLATOR_OUTPUTS if output_path is not None: files = [output_path / Path(file + ".csv") for file in files] return files diff --git a/src/ispypsa/translator/custom_constraints.py b/src/ispypsa/translator/custom_constraints.py index 4912498..5bed612 100644 --- a/src/ispypsa/translator/custom_constraints.py +++ b/src/ispypsa/translator/custom_constraints.py @@ -1,88 +1,205 @@ -from pathlib import Path - import pandas as pd -from ispypsa.translator.helpers import _annuitised_investment_costs +from ispypsa.config import ( + ModelConfig, +) +from ispypsa.translator.lines import _translate_time_varying_expansion_costs from ispypsa.translator.mappings import ( _CUSTOM_CONSTRAINT_ATTRIBUTES, - _CUSTOM_CONSTRAINT_EXPANSION_COSTS, - _CUSTOM_CONSTRAINT_LHS_TABLES, - _CUSTOM_CONSTRAINT_RHS_TABLES, _CUSTOM_CONSTRAINT_TERM_TYPE_TO_ATTRIBUTE_TYPE, _CUSTOM_CONSTRAINT_TERM_TYPE_TO_COMPONENT_TYPE, + _CUSTOM_GROUP_CONSTRAINTS, + _CUSTOM_TRANSMISSION_LIMIT_CONSTRAINTS, ) -def _combine_custom_constraints_tables(custom_constraint_tables: list[pd.DataFrame]): - """Combines a set of custom constraint data tables into a single data table, - renaming the columns so that they are consistent. +def _translate_custom_constraints( + config: ModelConfig, ispypsa_tables: dict[str, pd.DataFrame] +): + """Translate custom constraint tables into a PyPSA friendly format. Args: - custom_constraint_tables: list of pd.DataFrames specifying custom constraint - details - Returns: pd.DataFrame + config: `ispypsa.config.ModelConfig` object + ispypsa_tables: dictionary of dataframes providing the `ISPyPSA` input tables. + The relevant tables for this function are: + - rez_group_constraints_rhs + - rez_group_constraints_lhs + - rez_transmission_limit_constraints_lhs + - rez_transmission_limit_constraints_rhs + Not all of these tables need to be present but if one of the tables in + pair is present an error will be raised if the other is missing. + + Returns: dictionary of dataframes in the `PyPSA` friendly format, with the relevant + tables for custom constraint. """ - combined_data = [] - for table in custom_constraint_tables: - table = table.rename(columns=_CUSTOM_CONSTRAINT_ATTRIBUTES) - cols_to_keep = [ - col - for col in table.columns - if col in _CUSTOM_CONSTRAINT_ATTRIBUTES.values() + _check_custom_constraint_table_sets_are_complete(ispypsa_tables) + + pypsa_inputs = {} + + all_custom_constraint_tables = ( + _CUSTOM_GROUP_CONSTRAINTS + _CUSTOM_TRANSMISSION_LIMIT_CONSTRAINTS + ) + + present_custom_constraint_tables = [ + table for table in all_custom_constraint_tables if table in pypsa_inputs + ] + + if len(present_custom_constraint_tables) != 0: + custom_constraint_rhs_tables = [ + ispypsa_tables[table] + for table in all_custom_constraint_tables + if "_rhs" in table ] - table = table.loc[:, cols_to_keep] - combined_data.append(table) - combined_data = pd.concat(combined_data) - return combined_data + pypsa_inputs["custom_constraints_rhs"] = _translate_custom_constraint_rhs( + custom_constraint_rhs_tables + ) + + custom_constraint_lhs_tables = [ + ispypsa_tables[table] + for table in all_custom_constraint_tables + if "_lhs" in table + ] + + if config.network.rez_transmission_expansion: + pypsa_inputs["custom_constraints_generators"] = ( + _translate_custom_constraints_generators( + list(pypsa_inputs["custom_constraints_rhs"]["constraint_name"]), + ispypsa_tables["rez_transmission_expansion_costs"], + config.wacc, + config.network.annuitisation_lifetime, + config.temporal.capacity_expansion.investment_periods, + config.temporal.year_type, + ) + ) + + custom_constraint_generators_lhs = ( + _translate_custom_constraint_generators_to_lhs( + pypsa_inputs["custom_constraints_generators"] + ) + ) + + custom_constraint_lhs_tables += [custom_constraint_generators_lhs] + + pypsa_inputs["custom_constraints_lhs"] = _translate_custom_constraint_lhs( + custom_constraint_lhs_tables + ) + + return pypsa_inputs + + +def _check_custom_constraint_table_sets_are_complete( + ispypsa_tables: dict[str, pd.DataFrame], +): + """Raise an error if a partially complete set of input tables has been provided + for a set of custom constraints. + """ + + def check_for_partially_complete_inputs(input_table_list, input_set_name): + tables_present = sum( + table in ispypsa_tables.keys() for table in input_table_list + ) + if tables_present != len(input_table_list) and tables_present > 0: + raise ValueError( + f"An incomplete set of inputs have been provided for {input_set_name}" + ) + + check_for_partially_complete_inputs( + _CUSTOM_GROUP_CONSTRAINTS, "custom group constraints" + ) + + check_for_partially_complete_inputs( + _CUSTOM_TRANSMISSION_LIMIT_CONSTRAINTS, "custom transmission limit constraints" + ) def _translate_custom_constraints_generators( - custom_constraint_generators: list[pd.DataFrame], - expansion_on: bool, + custom_constraints: list[int], + rez_expansion_costs: pd.DataFrame, wacc: float, asset_lifetime: int, + investment_periods: list[int], + year_type: str, ) -> pd.DataFrame: - """Combines all tables specifying the expansion costs of custom constraint - rhs values into a single pd.Dataframe formatting the data so the rhs - can be represented by PyPSA generator components. PyPSA can then invest in - additional capacity for the generators which is used in the custom constraints - to represent additional transmission capacity. + """Translates REZ network expansion data into custom generators for modelling + rez constraint relaxation. Args: - custom_constraint_generators: list of pd.DataFrames in `ISPyPSA` detailing - custom constraint generator expansion costs. - expansion_on: bool indicating if transmission line expansion is considered. - wacc: float, as fraction, indicating the weighted average coast of capital for - transmission line investment, for the purposes of annuitising capital - costs. - asset_lifetime: int specifying the nominal asset lifetime in years or the - purposes of annuitising capital costs. + custom_constraints: list of custom constraints to create expansion generators + for. + rez_expansion_costs: pd.DataFrame with time-varying expansion costs. + wacc: float indicating the weighted average coast of capital. + asset_lifetime: int specifying the nominal asset lifetime in years. + investment_periods: list of investment years for time-varying costs. + year_type: temporal configuration ("fy" or "calendar") for time-varying costs. Returns: pd.DataFrame """ - custom_constraint_generators = _combine_custom_constraints_tables( - custom_constraint_generators + rez_expansion_costs = rez_expansion_costs[ + rez_expansion_costs["rez_constraint_id"].isin(custom_constraints) + ] + + expansion_generators = _translate_time_varying_expansion_costs( + expansion_costs=rez_expansion_costs, + cost_column_suffix="_$/mw", + investment_periods=investment_periods, + year_type=year_type, + wacc=wacc, + asset_lifetime=asset_lifetime, ) - custom_constraint_generators = custom_constraint_generators.rename( - columns={"variable_name": "name"} + expansion_generators = expansion_generators.rename( + columns={ + "rez_constraint_id": "constraint_name", + "investment_year": "build_year", + } ) - custom_constraint_generators["bus"] = "bus_for_custom_constraint_gens" - custom_constraint_generators["p_nom"] = 0.0 + expansion_generators["name"] = ( + expansion_generators["constraint_name"] + + "_exp_" + + expansion_generators["build_year"].astype(str) + ) + expansion_generators["bus"] = "bus_for_custom_constraint_gens" + expansion_generators["p_nom"] = 0.0 + expansion_generators["p_nom_extendable"] = True + expansion_generators["lifetime"] = asset_lifetime - # The generator size is only used for additional transmission capacity, so it - # initial size is 0.0. - custom_constraint_generators["capital_cost"] = custom_constraint_generators[ - "capital_cost" - ].apply(lambda x: _annuitised_investment_costs(x, wacc, asset_lifetime)) + # Keep only the columns needed for PyPSA generators + expansion_cols = [ + "name", + "constraint_name", + "bus", + "p_nom", + "p_nom_extendable", + "build_year", + "lifetime", + "capital_cost", + ] + expansion_generators = expansion_generators[expansion_cols] + return expansion_generators.reset_index(drop=True) - # not extendable by default - custom_constraint_generators["p_nom_extendable"] = False - mask = ~custom_constraint_generators["capital_cost"].isna() - custom_constraint_generators.loc[mask, "p_nom_extendable"] = expansion_on - return custom_constraint_generators +def _combine_custom_constraints_tables(custom_constraint_tables: list[pd.DataFrame]): + """Combines a set of custom constraint data tables into a single data table, + renaming the columns so that they are consistent. + + Args: + custom_constraint_tables: list of pd.DataFrames specifying custom constraint + details + Returns: pd.DataFrame + """ + combined_data = [] + for table in custom_constraint_tables: + table = table.rename(columns=_CUSTOM_CONSTRAINT_ATTRIBUTES) + cols_to_keep = [ + col + for col in table.columns + if col in _CUSTOM_CONSTRAINT_ATTRIBUTES.values() + ] + table = table.loc[:, cols_to_keep] + combined_data.append(table) + combined_data = pd.concat(combined_data) + return combined_data def _translate_custom_constraint_rhs( @@ -131,3 +248,24 @@ def _translate_custom_constraint_lhs( columns="term_type" ) return custom_constraint_lhs_values + + +def _translate_custom_constraint_generators_to_lhs( + custom_constraint_generators: pd.DataFrame, +) -> pd.DataFrame: + """Create the lhs definitions to match the generators used to relax custom + constraints + + Args: + custom_constraint_generators: pd.DataFrames detailing the + custom constraint generators + + Returns: pd.DataFrame + """ + custom_constraint_generators = custom_constraint_generators.rename( + columns={"constraint_name": "constraint_id", "name": "term_id"} + ) + custom_constraint_generators["term_type"] = "generator_capacity" + custom_constraint_generators["coefficient"] = -1.0 + col_order = ["constraint_id", "term_type", "term_id", "coefficient"] + return custom_constraint_generators.loc[:, col_order] diff --git a/src/ispypsa/translator/lines.py b/src/ispypsa/translator/lines.py index 48d6444..ec74722 100644 --- a/src/ispypsa/translator/lines.py +++ b/src/ispypsa/translator/lines.py @@ -1,42 +1,236 @@ -from pathlib import Path - +import numpy as np import pandas as pd +from ispypsa.config import ModelConfig from ispypsa.translator.helpers import _annuitised_investment_costs from ispypsa.translator.mappings import _LINE_ATTRIBUTES def _translate_flow_paths_to_lines( - flow_paths: pd.DataFrame, - expansion_on: bool, + ispypsa_tables: dict[str, pd.DataFrame], + config: ModelConfig, +) -> pd.DataFrame: + """Process network line data into the PyPSA friendly format. + + Args: + ispypsa_tables: Dictionary of ISPyPSA DataFrames, expecting "flow_paths" + and "flow_path_expansion_costs". + config: Configuration object with temporal, WACC, and network lifetime settings. + + Returns: + pd.DataFrame: PyPSA style line attributes in tabular format, including both + existing lines and potential expansion lines. + """ + existing_flow_paths_df = ispypsa_tables["flow_paths"] + existing_lines = _translate_existing_flow_path_capacity_to_lines( + existing_flow_paths_df + ) + + if config.network.transmission_expansion: + expansion_lines = _translate_expansion_costs_to_lines( + ispypsa_tables["flow_path_expansion_costs"], + existing_lines.copy(), + config.temporal.capacity_expansion.investment_periods, + config.temporal.year_type, + config.wacc, + config.network.annuitisation_lifetime, + ) + else: + expansion_lines = pd.DataFrame() + + all_lines = pd.concat( + [existing_lines, expansion_lines], ignore_index=True, sort=False + ) + + return all_lines + + +def _translate_existing_flow_path_capacity_to_lines( + existing_flow_paths: pd.DataFrame, +) -> pd.DataFrame: + """Translates existing flow path capacities to PyPSA line components. + + Args: + existing_flow_paths: DataFrame from ispypsa_tables["flow_paths"]. + + Returns: + `pd.DataFrame`: PyPSA style line attributes in tabular format. + """ + lines_df = existing_flow_paths.loc[:, list(_LINE_ATTRIBUTES.keys())].copy() + lines_df = lines_df.rename(columns=_LINE_ATTRIBUTES) + + lines_df["name"] = lines_df["name"] + "_existing" + + lines_df["s_nom_extendable"] = False + lines_df["capital_cost"] = np.nan + + return lines_df + + +def _translate_expansion_costs_to_lines( + expansion_costs: pd.DataFrame, + existing_lines_df: pd.DataFrame, + investment_periods: list[int], + year_type: str, wacc: float, asset_lifetime: int, + id_column: str = "flow_path", + match_column: str = "name", ) -> pd.DataFrame: - """Process network line data into a format aligned with PyPSA inputs. + """Translates expansion costs to PyPSA line components. + + This function uses the generic _translate_time_varying_expansion_costs function + to process the expansion costs, then creates appropriate line components. Args: - flow_paths: `ISPyPSA` formatted pd.DataFrame detailing flow path capabilities - between regions or sub regions depending on granularity. - expansion_on: bool indicating if transmission line expansion is considered. - wacc: float, as fraction, indicating the weighted average coast of capital for - transmission line investment, for the purposes of annuitising capital - costs. - asset_lifetime: int specifying the nominal asset lifetime in years or the - purposes of annuitising capital costs. + expansion_costs: `ISPyPSA` formatted pd.DataFrame detailing + the expansion costs with financial year columns. + existing_lines_df: `PyPSA` style line attributes in tabular format. + Used to source bus/carrier data. + investment_periods: List of investment years (e.g., [2025, 2030]). + year_type: Temporal configuration, e.g., "fy" or "calendar". + wacc: Weighted average cost of capital. + asset_lifetime: Nominal asset lifetime in years. + id_column: Column name in expansion_costs containing the identifier. + match_column: Column name in existing_lines_df to match with id_column. Returns: - `pd.DataFrame`: PyPSA style generator attributes in tabular format. + `pd.DataFrame`: PyPSA style line attributes in tabular format. """ - lines = flow_paths.loc[:, _LINE_ATTRIBUTES.keys()] - lines = lines.rename(columns=_LINE_ATTRIBUTES) + # Use the generic function to process costs + processed_costs = _translate_time_varying_expansion_costs( + expansion_costs=expansion_costs, + cost_column_suffix="_$/mw", + investment_periods=investment_periods, + year_type=year_type, + wacc=wacc, + asset_lifetime=asset_lifetime, + ) - lines["capital_cost"] = lines["capital_cost"].apply( - lambda x: _annuitised_investment_costs(x, wacc, asset_lifetime) + if processed_costs.empty: + return pd.DataFrame() + + # Prepare for merging with existing lines data + pypsa_attributes_to_carry = ["bus0", "bus1", "carrier"] + + # remove "_existing" suffix from line names so we can match on the eixtsing line + # data. + existing_lines_copy = existing_lines_df.copy() + existing_lines_copy[match_column] = existing_lines_copy[match_column].str.replace( + "_existing", "" + ) + + # Merge with existing lines to get attributes like bus0, bus1, carrier + df_merged = pd.merge( + processed_costs, + existing_lines_copy[[match_column] + pypsa_attributes_to_carry], + left_on=id_column, + right_on=match_column, + ) + + # Directly modify df_merged to create the expansion lines + df_merged["name"] = ( + df_merged["bus0"] + + "-" + + df_merged["bus1"] + + "_exp_" + + df_merged["investment_year"].astype(str) + ) + df_merged["s_nom"] = 0.0 + df_merged["s_nom_extendable"] = True + df_merged["build_year"] = df_merged["investment_year"] + df_merged["lifetime"] = asset_lifetime + + # Keep only the columns needed for PyPSA lines + expansion_cols = [ + "name", + "bus0", + "bus1", + "carrier", + "s_nom", + "s_nom_extendable", + "build_year", + "lifetime", + "capital_cost", + ] + expansion_lines = df_merged[expansion_cols] + + return expansion_lines + + +def _translate_time_varying_expansion_costs( + expansion_costs: pd.DataFrame, + cost_column_suffix: str, + investment_periods: list[int], + year_type: str, + wacc: float, + asset_lifetime: int, +) -> pd.DataFrame: + """Process time-varying expansion costs for flow paths and rezs. + + Converts from years as columns to years as rows, extracts model year from column + name, and annuitises expansion costs. + + Args: + expansion_costs: DataFrame containing expansion cost data with time-varying costs. + id_column: Name of the column that contains the component identifier. + cost_column_suffix: Suffix for cost columns (e.g. "_$/mw"). + investment_periods: List of investment years (e.g., [2025, 2030]). + year_type: Temporal configuration, e.g., "fy" or "calendar". + wacc: Weighted average cost of capital. + asset_lifetime: Nominal asset lifetime in years. + + Returns: + pd.DataFrame: Processed expansion costs with parsed years and annuitized costs. + """ + if expansion_costs.empty: + return pd.DataFrame() + + # Extract cost columns (those ending with the specified suffix) + cost_cols = [ + col for col in expansion_costs.columns if col.endswith(cost_column_suffix) + ] + id_vars = [col for col in expansion_costs.columns if col not in cost_cols] + + # Melt the dataframe to convert from wide to long format + df_melted = expansion_costs.melt( + id_vars=id_vars, + value_vars=cost_cols, + var_name="cost_year_raw_with_suffix", + value_name="cost_per_unit", ) - # not extendable by default - lines["s_nom_extendable"] = False - # If a non-nan capital_cost is given then set to extendable - lines.loc[~lines["capital_cost"].isna(), "s_nom_extendable"] = expansion_on + # Drop rows with NaN costs + df_melted = df_melted.dropna(subset=["cost_per_unit"]) + if df_melted.empty: + return pd.DataFrame() + + # Parse financial year from cost column names + def parse_cost_year(cost_year_raw: str) -> int: + year_part = cost_year_raw.split(cost_column_suffix)[0] # e.g., "2025_26" + if year_type == "fy": + # For financial year format like "2025_26" + yy_part = year_part.split("_")[1] # e.g., "26" + return 2000 + int(yy_part) # e.g., 2026, as per spec + elif year_type == "calendar": + raise NotImplementedError( + f"Calendar years not implemented for transmission costs" + ) + else: + raise ValueError(f"Unknown year_type: {year_type}") + + df_melted["investment_year"] = df_melted["cost_year_raw_with_suffix"].apply( + parse_cost_year + ) + + # Filter to only include costs relevant to our investment periods + df_melted = df_melted[df_melted["investment_year"].isin(investment_periods)] + if df_melted.empty: + return pd.DataFrame() + + # Annuitize the costs + df_melted["capital_cost"] = df_melted["cost_per_unit"].apply( + lambda x: _annuitised_investment_costs(x, wacc, asset_lifetime) + ) - return lines + return df_melted diff --git a/src/ispypsa/translator/mappings.py b/src/ispypsa/translator/mappings.py index 6ec8778..41ff4de 100644 --- a/src/ispypsa/translator/mappings.py +++ b/src/ispypsa/translator/mappings.py @@ -7,11 +7,11 @@ _BUS_ATTRIBUTES = {"isp_sub_region_id": "name"} _LINE_ATTRIBUTES = { - "flow_path_name": "name", + "flow_path": "name", + "carrier": "carrier", "node_from": "bus0", "node_to": "bus1", "forward_direction_mw_summer_typical": "s_nom", - "indicative_transmission_expansion_cost_$/mw": "capital_cost", # TODO: implement reverse direction limit # "reverse_direction_mw_summer_typical": "" } @@ -19,6 +19,7 @@ _REZ_LINE_ATTRIBUTES = { "rez_id": "bus0", "isp_sub_region_id": "bus1", + "carrier": "carrier", "rez_transmission_network_limit_summer_typical": "s_nom", "indicative_transmission_expansion_cost_$/mw": "capital_cost", } @@ -32,19 +33,14 @@ "coefficient": "coefficient", } -_CUSTOM_CONSTRAINT_EXPANSION_COSTS = [ - "rez_group_constraints_expansion_costs", - "rez_transmission_limit_constraints_expansion_costs", -] - -_CUSTOM_CONSTRAINT_RHS_TABLES = [ +_CUSTOM_GROUP_CONSTRAINTS = [ "rez_group_constraints_rhs", - "rez_transmission_limit_constraints_rhs", + "rez_group_constraints_lhs", ] -_CUSTOM_CONSTRAINT_LHS_TABLES = [ - "rez_group_constraints_lhs", +_CUSTOM_TRANSMISSION_LIMIT_CONSTRAINTS = [ "rez_transmission_limit_constraints_lhs", + "rez_transmission_limit_constraints_rhs", ] _CUSTOM_CONSTRAINT_TERM_TYPE_TO_COMPONENT_TYPE = { diff --git a/src/ispypsa/translator/renewable_energy_zones.py b/src/ispypsa/translator/renewable_energy_zones.py index 02f4d5c..9453da1 100644 --- a/src/ispypsa/translator/renewable_energy_zones.py +++ b/src/ispypsa/translator/renewable_energy_zones.py @@ -1,29 +1,65 @@ -from pathlib import Path - import pandas as pd -from ispypsa.translator.helpers import _annuitised_investment_costs +from ispypsa.config import ModelConfig +from ispypsa.translator.lines import _translate_expansion_costs_to_lines from ispypsa.translator.mappings import _REZ_LINE_ATTRIBUTES -def _translate_renewable_energy_zone_build_limits_to_flow_paths( +def _translate_renewable_energy_zone_build_limits_to_lines( renewable_energy_zone_build_limits: pd.DataFrame, - expansion_on: bool, - wacc: float, - asset_lifetime: int, - rez_to_sub_region_transmission_default_limit: float, + rez_expansion_costs: pd.DataFrame, + config: ModelConfig, ) -> pd.DataFrame: """Process renewable energy zone build limit data to format aligned with PyPSA - inputs. + inputs, incorporating time-varying expansion costs. + + Args: + renewable_energy_zone_build_limits: `ISPyPSA` formatted pd.DataFrame detailing + Renewable Energy Zone transmission limits. + rez_expansion_costs: `ISPyPSA` formatted pd.DataFrame detailing Renewable Energy + Zone expansion costs by year. + config: ModelConfig object containing wacc, investment periods, etc. + + Returns: + `pd.DataFrame`: PyPSA style line attributes in tabular format. + """ + # Create existing lines from renewable energy zone build limits + existing_lines = _translate_existing_rez_connections_to_lines( + renewable_energy_zone_build_limits, + config.network.rez_to_sub_region_transmission_default_limit, + ) + + # Create expansion lines from rez expansion costs if expansion is enabled + if config.network.rez_transmission_expansion and not rez_expansion_costs.empty: + expansion_lines = _translate_expansion_costs_to_lines( + expansion_costs=rez_expansion_costs, + existing_lines_df=existing_lines.copy(), + investment_periods=config.temporal.capacity_expansion.investment_periods, + year_type=config.temporal.year_type, + wacc=config.wacc, + asset_lifetime=config.network.annuitisation_lifetime, + id_column="rez_constraint_id", + match_column="name", + ) + # Combine existing and expansion lines + all_lines = pd.concat( + [existing_lines, expansion_lines], ignore_index=True, sort=False + ) + else: + all_lines = existing_lines + + return all_lines + + +def _translate_existing_rez_connections_to_lines( + renewable_energy_zone_build_limits: pd.DataFrame, + rez_to_sub_region_transmission_default_limit: float, +) -> pd.DataFrame: + """Process existing REZ connection limits to PyPSA lines. Args: renewable_energy_zone_build_limits: `ISPyPSA` formatted pd.DataFrame detailing Renewable Energy Zone transmission limits. - wacc: float, as fraction, indicating the weighted average cost of capital for - transmission line investment, for the purposes of annuitising capital - costs. - asset_lifetime: int specifying the nominal asset lifetime in years or the - purposes of annuitising capital costs. rez_to_sub_region_transmission_default_limit: float specifying the transmission limit to use for rez to subregion connections when an explicit limit is not given in the inputs. @@ -33,20 +69,13 @@ def _translate_renewable_energy_zone_build_limits_to_flow_paths( """ lines = renewable_energy_zone_build_limits.loc[:, _REZ_LINE_ATTRIBUTES.keys()] lines = lines.rename(columns=_REZ_LINE_ATTRIBUTES) - lines["name"] = lines["bus0"] + "-" + lines["bus1"] + lines["name"] = lines["bus0"] + "-" + lines["bus1"] + "_existing" # Lines without an explicit limit because their limits are modelled through - # custom constraints are given a very large capacity because using inf causes - # infeasibility + # custom constraints are given a very large capacity lines["s_nom"] = lines["s_nom"].fillna(rez_to_sub_region_transmission_default_limit) - lines["capital_cost"] = lines["capital_cost"].apply( - lambda x: _annuitised_investment_costs(x, wacc, asset_lifetime) - ) - - # not extendable by default + # Not extendable for existing lines lines["s_nom_extendable"] = False - # If a non-nan capital_cost is given then set to extendable - lines.loc[~lines["capital_cost"].isna(), "s_nom_extendable"] = expansion_on return lines diff --git a/tests/conftest.py b/tests/conftest.py index 1ff7370..7525068 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,8 +1,21 @@ +import io from pathlib import Path +import pandas as pd import pytest @pytest.fixture(scope="module") def workbook_table_cache_test_path(): return Path("tests", "test_workbook_table_cache") + + +@pytest.fixture +def csv_str_to_df(): + def func(csv_str, **kwargs): + """Helper function to convert a CSV string to a DataFrame.""" + # Remove spaces and tabs that have been included for readability. + csv_str = csv_str.replace(" ", "").replace("\t", "").replace("__", " ") + return pd.read_csv(io.StringIO(csv_str), **kwargs) + + return func diff --git a/tests/test_templater/test_flow_path_costs.py b/tests/test_templater/test_flow_path_costs.py new file mode 100644 index 0000000..50e2c9d --- /dev/null +++ b/tests/test_templater/test_flow_path_costs.py @@ -0,0 +1,337 @@ +import numpy as np +import pandas as pd +import pytest + +from ispypsa.templater.flow_paths import ( + _get_augmentation_table, + _get_cost_table, + _get_least_cost_options, + _template_sub_regional_flow_path_costs, + process_transmission_costs, +) +from ispypsa.templater.mappings import ( + _FLOW_PATH_AGUMENTATION_TABLES, + _FLOW_PATH_CONFIG, +) + + +def test_template_sub_regional_flow_path_costs_simple_least_cost_option(): + # Augmentation tables for NNSW-SQ and TAS-VIC + aug_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2", "NNSW–SQ Option 5"], + "forward_capacity_increase": [100, 200, 40], + "reverse_capacity_increase": [90, 140, 50], + } + ) + aug_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": [ + "TAS-VIC Option 1 (Project Marinus Stage 1)", + "TAS-VIC Option 2 (Project Marinus Stage 2)", + ], + "forward_capacity_increase": [130, 70], + "reverse_capacity_increase": [150, 65], + } + ) + # Cost tables for NNSW-SQ and TAS-VIC + # Option 2 is least cost and has the largest increase so should be chosen. + cost_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2"], + "2024_25": [50, 40], + "2025_26": [55, 45], + } + ) + # Option 1 is least cost and has the largest increase so should be chosen. + cost_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": [ + "TAS-VIC Option 1 (Project Marinus Stage 1)", + "TAS-VIC Option 2 (Project Marinus Stage 2)", + ], + "2024_25": [70, np.nan], # actionable ISP option has NaN + "2025_26": [75, np.nan], + } + ) + # Preparatory activities and actionable ISP tables (should not be chosen) + # Note: ISPyPSA contains internal mappings which match the names used in Preparatory + # and actionable isp cost tables to the names used in the augmentation tables. + prep_acts = pd.DataFrame( + { + "Flow path": ["500kV QNI Connect (NSW works)"], + "2024_25": [100], + "2025_26": [110], + } + ) + actionable_isp = pd.DataFrame( + { + "Flow path": ["Project Marinus Stage 1"], + "2024_25": [999], + "2025_26": [999], + } + ) + # Compose iasr_tables dict + iasr_tables = { + "flow_path_augmentation_options_NNSW-SQ": aug_table_nnsw_sq, + "flow_path_augmentation_options_TAS-VIC": aug_table_tas_vic, + "flow_path_augmentation_costs_progressive_change_NNSW-SQ": cost_table_nnsw_sq, + "flow_path_augmentation_costs_progressive_change_TAS-VIC": cost_table_tas_vic, + "flow_path_augmentation_costs_progressive_change_preparatory_activities": prep_acts, + "flow_path_augmentation_costs_progressive_change_actionable_isp_projects": actionable_isp, + } + scenario = "Progressive Change" + # Run function + result = _template_sub_regional_flow_path_costs(iasr_tables, scenario) + # Check least cost options are chosen for NNSW-SQ and TAS-VIC + nnsw_sq_row = result[result["flow_path"] == "NNSW-SQ"] + tas_vic_row = result[result["flow_path"] == "TAS-VIC"] + assert nnsw_sq_row["option"].iloc[0] == "NNSW-SQ Option 2" + assert tas_vic_row["option"].iloc[0] == "TAS-VIC Option 1 (Project Marinus Stage 1)" + # Check nominal_flow_limit_increase_mw is correct + assert nnsw_sq_row["additional_network_capacity_mw"].iloc[0] == 200 + assert tas_vic_row["additional_network_capacity_mw"].iloc[0] == 150 + # Check cost per year column is correct (cost divided by nominal limit) + # For NNSW-SQ Option 2: 2024_25 = 40/200 = 0.2, 2025_26 = 45/200 = 0.225 + # For TAS-VIC Option 1: 2024_25 = 70/150 ≈ 0.4667, 2025_26 = 75/150 = 0.5 + assert abs(nnsw_sq_row["2024_25_$/mw"].iloc[0] - 0.2) < 1e-6 + assert abs(nnsw_sq_row["2025_26_$/mw"].iloc[0] - 0.225) < 1e-6 + assert abs(tas_vic_row["2024_25_$/mw"].iloc[0] - (70 / 150)) < 1e-6 + assert abs(tas_vic_row["2025_26_$/mw"].iloc[0] - 0.5) < 1e-6 + + +def test_template_sub_regional_flow_path_costs_prep_and_actionable_chosen(): + """ + The cost of the non preparatory activities and non actionable isp projects + have been made very high and therefore preparatory activities and + actionable isp projects should be chosen. + """ + # Augmentation tables for NNSW-SQ and TAS-VIC + aug_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2", "NNSW–SQ Option 5"], + "forward_capacity_increase": [100, 150, 200], + "reverse_capacity_increase": [100, 150, 150], + } + ) + aug_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": [ + "TAS-VIC Option 1 (Project Marinus Stage 1)", + "TAS-VIC Option 2 (Project Marinus Stage 2)", + ], + "forward_capacity_increase": [140, 150], + "reverse_capacity_increase": [145, 130], + } + ) + # Standard cost tables (set high or NaN) + cost_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2"], + "2024_25": [1000, 1000], + "2025_26": [1000, 1000], + } + ) + cost_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": [ + "TAS-VIC Option 1 (Project Marinus Stage 1)", + "TAS-VIC Option 2 (Project Marinus Stage 2)", + ], + "2024_25": [1000, np.nan], + "2025_26": [1000, np.nan], + } + ) + # Preparatory activities and actionable ISP tables (set low cost) + # Note: ISPyPSA contains internal mappings which match the names used in Preparatory + # and actionable isp cost tables to the names used in the augmentation tables. + prep_acts = pd.DataFrame( + { + "Flow path": ["500kV QNI Connect (NSW works)"], + "2024-25": [10], + "2025-26": [20], + } + ) + actionable_isp = pd.DataFrame( + { + "Flow path": ["Project Marinus Stage 2"], + "2024-25": [15], + "2025-26": [25], + } + ) + # Compose iasr_tables dict + iasr_tables = { + "flow_path_augmentation_options_NNSW-SQ": aug_table_nnsw_sq, + "flow_path_augmentation_options_TAS-VIC": aug_table_tas_vic, + "flow_path_augmentation_costs_progressive_change_NNSW-SQ": cost_table_nnsw_sq, + "flow_path_augmentation_costs_progressive_change_TAS-VIC": cost_table_tas_vic, + "flow_path_augmentation_costs_progressive_change_preparatory_activities": prep_acts, + "flow_path_augmentation_costs_progressive_change_actionable_isp_projects": actionable_isp, + } + scenario = "Progressive Change" + # Run function + result = _template_sub_regional_flow_path_costs(iasr_tables, scenario) + # Check that the prep activity is chosen for NNSW-SQ and actionable ISP for TAS-VIC + nnsw_sq_row = result[result["flow_path"] == "NNSW-SQ"] + tas_vic_row = result[result["flow_path"] == "TAS-VIC"] + assert nnsw_sq_row["option"].iloc[0] == "NNSW–SQ Option 5" + assert tas_vic_row["option"].iloc[0] == "TAS-VIC Option 2 (Project Marinus Stage 2)" + # Check nominal_flow_limit_increase_mw is correct + assert nnsw_sq_row["additional_network_capacity_mw"].iloc[0] == 200 + assert tas_vic_row["additional_network_capacity_mw"].iloc[0] == 150 + # Check cost per year column is correct (cost divided by nominal limit) + assert abs(nnsw_sq_row["2024_25_$/mw"].iloc[0] - (10 / 200)) < 1e-6 + assert abs(nnsw_sq_row["2025_26_$/mw"].iloc[0] - (20 / 200)) < 1e-6 + assert abs(tas_vic_row["2024_25_$/mw"].iloc[0] - (15 / 150)) < 1e-6 + assert abs(tas_vic_row["2025_26_$/mw"].iloc[0] - (25 / 150)) < 1e-6 + + +def test_template_sub_regional_flow_path_costs_use_first_year_with_valid_costs(): + """ + Test that the first year with non-nan cost data for all options is used. + """ + # NNSW-SQ: only 2025_26 has all non-nan costs + aug_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2", "NNSW-SQ Option 3"], + "forward_capacity_increase": [150, 200, 200], + "reverse_capacity_increase": [200, 150, 150], + } + ) + # Even though option 3 is cheaper than option 2 in 2024_25, option 2 should get + # chosen because 2025_26 is used as the comparison year. + cost_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2", "NNSW-SQ Option 3"], + "2024_25": [np.nan, 50, 10], + "2025_26": [35, 45, 50], + } + ) + # TAS-VIC: all years have valid costs + aug_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": ["TAS-VIC Option 1", "TAS-VIC Option 2"], + "forward_capacity_increase": [90, 100], + "reverse_capacity_increase": [100, 90], + } + ) + cost_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": ["TAS-VIC Option 1", "TAS-VIC Option 2"], + "2024_25": [100, 10], + "2025_26": [10, 100], + } + ) + iasr_tables = { + "flow_path_augmentation_options_NNSW-SQ": aug_table_nnsw_sq, + "flow_path_augmentation_options_TAS-VIC": aug_table_tas_vic, + "flow_path_augmentation_costs_progressive_change_NNSW-SQ": cost_table_nnsw_sq, + "flow_path_augmentation_costs_progressive_change_TAS-VIC": cost_table_tas_vic, + } + scenario = "Progressive Change" + result = _template_sub_regional_flow_path_costs(iasr_tables, scenario) + # NNSW-SQ: Only 2025_26 has all non-nan costs, so selection is based on that year for all years + nnsw_sq_row = result[result["flow_path"] == "NNSW-SQ"] + assert nnsw_sq_row["option"].iloc[0] == "NNSW-SQ Option 1" + assert nnsw_sq_row["additional_network_capacity_mw"].iloc[0] == 200 + assert np.isnan(nnsw_sq_row["2024_25_$/mw"].iloc[0]) + assert abs(nnsw_sq_row["2025_26_$/mw"].iloc[0] - (35 / 200)) < 1e-6 + # TAS-VIC: both years valid, Option 2 is the least cost only in first, + # but should be chosen on this basis. + tas_vic_row = result[result["flow_path"] == "TAS-VIC"] + assert tas_vic_row["option"].iloc[0] == "TAS-VIC Option 2" + assert tas_vic_row["additional_network_capacity_mw"].iloc[0] == 100 + assert abs(tas_vic_row["2024_25_$/mw"].iloc[0] - (10 / 100)) < 1e-6 + assert abs(tas_vic_row["2025_26_$/mw"].iloc[0] - (100 / 100)) < 1e-6 + + +def test_get_least_cost_options_logs_unmatched(caplog): + """ + Test that _get_least_cost_options logs dropped flow_path/option_name pairs from both tables. + """ + # Augmentation table has one extra option not in cost table + aug_table = pd.DataFrame( + { + "id": ["A", "A", "B"], + "option": ["opt1", "opt2", "opt3"], + "nominal_capacity_increase": [100, 200, 300], + } + ) + # Cost table has one extra option not in aug table + cost_table = pd.DataFrame( + { + "id": ["A", "A", "B"], + "option": ["opt1", "opt2", "opt4"], + "2024_25": [10, 20, 30], + "2025_26": [15, 25, 35], + } + ) + # Only the (B, opt3) and (B, opt4) pairs should be dropped + with caplog.at_level("INFO"): + result = _get_least_cost_options(aug_table, cost_table, _FLOW_PATH_CONFIG) + # Check logs for both dropped pairs + assert "Dropped options from augmentation table: [('B', 'opt3')]" in caplog.text + assert "Dropped options from cost table: [('B', 'opt4')]" in caplog.text + + +def test_get_full_flow_path_aug_table_logs_missing_tables(caplog): + """ + Test that _get_augmentation_table logs a warning when augmentation tables are missing. + """ + # Only provide one of the required augmentation tables + present_table = _FLOW_PATH_CONFIG["table_names"]["augmentation"][0] + iasr_tables = { + present_table: pd.DataFrame( + { + "Flow path": ["A"], + "Option Name": ["opt1"], + "forward_capacity_increase": [100], + "reverse_capacity_increase": [90], + } + ) + } + missing = [ + t + for t in _FLOW_PATH_CONFIG["table_names"]["augmentation"] + if t != present_table + ] + with caplog.at_level("WARNING"): + _get_augmentation_table(iasr_tables, _FLOW_PATH_CONFIG) + # Check that the warning about missing tables is logged + assert f"Missing augmentation tables: {missing}" in caplog.text + + +def test_get_cleaned_flow_path_cost_tables_logs_missing_tables(caplog): + """ + Test that _get_cost_table logs a warning when cost tables are missing. + """ + # Only provide one of the required cost tables + cost_scenario = "progressive_change" + cost_table_names = _FLOW_PATH_CONFIG["table_names"]["cost"][cost_scenario] + present_table = cost_table_names[0] + iasr_tables = { + present_table: pd.DataFrame( + { + "id": ["A"], + "option": ["opt1"], + "2024_25": [10], + } + ) + } + missing = [t for t in cost_table_names if t != present_table] + with caplog.at_level("WARNING"): + _get_cost_table(iasr_tables, cost_scenario, _FLOW_PATH_CONFIG) + # Check that the warning about missing tables is logged + assert f"Missing cost tables: {missing}" in caplog.text diff --git a/tests/test_templater/test_flow_paths.py b/tests/test_templater/test_flow_paths.py index ad51e7f..fbe3b29 100644 --- a/tests/test_templater/test_flow_paths.py +++ b/tests/test_templater/test_flow_paths.py @@ -5,6 +5,7 @@ from ispypsa.templater import load_manually_extracted_tables from ispypsa.templater.flow_paths import ( _template_regional_interconnectors, + _template_sub_regional_flow_path_costs, _template_sub_regional_flow_paths, ) @@ -38,7 +39,7 @@ def test_flow_paths_templater_regional(workbook_table_cache_test_path: Path): [ True for name in ("QNI", "Terranora", "Heywood", "Murraylink", "Basslink") - if name in flow_paths_template.flow_path_name + if name in flow_paths_template.flow_path ] ) assert len(flow_paths_template) == 6 @@ -50,9 +51,8 @@ def test_flow_paths_templater_sub_regional(workbook_table_cache_test_path: Path) "flow_path_transfer_capability.csv" ) flow_path_transfer_capability = pd.read_csv(filepath) - manual_tables = load_manually_extracted_tables("6.0") flow_paths_template = _template_sub_regional_flow_paths( - flow_path_transfer_capability, manual_tables["transmission_expansion_costs"] + flow_path_transfer_capability ) assert all( [ @@ -75,8 +75,8 @@ def test_flow_paths_templater_sub_regional(workbook_table_cache_test_path: Path) [ True for name in ("QNI", "Terranora", "Heywood", "Murraylink", "Basslink") - if name in flow_paths_template.flow_path_name + if name in flow_paths_template.flow_path ] ) assert len(flow_paths_template) == 14 - assert len(flow_paths_template.columns) == 6 + assert len(flow_paths_template.columns) == 5 diff --git a/tests/test_templater/test_nodes.py b/tests/test_templater/test_nodes.py index 6a02c01..e1693fc 100644 --- a/tests/test_templater/test_nodes.py +++ b/tests/test_templater/test_nodes.py @@ -16,9 +16,9 @@ def test_node_templater_nem_regions(workbook_table_cache_test_path: Path): ("Prominent Hill", "Barcaldine") ) assert set(regional_template.regional_reference_node_voltage_kv) == set((132,)) - assert not regional_template.substation_longitude.empty - assert not regional_template.substation_latitude.empty - assert len(regional_template.columns) == 6 + # assert not regional_template.substation_longitude.empty + # assert not regional_template.substation_latitude.empty + assert len(regional_template.columns) == 4 def test_templater_sub_regions(workbook_table_cache_test_path: Path): @@ -31,9 +31,9 @@ def test_templater_sub_regions(workbook_table_cache_test_path: Path): ("Prominent Hill", "Barcaldine") ) assert set(sub_regions_template.sub_region_reference_node_voltage_kv) == set((132,)) - assert not sub_regions_template.substation_longitude.empty - assert not sub_regions_template.substation_latitude.empty - assert len(sub_regions_template.columns) == 6 + # assert not sub_regions_template.substation_longitude.empty + # assert not sub_regions_template.substation_latitude.empty + assert len(sub_regions_template.columns) == 4 def test_templater_sub_regions_mapping_only(workbook_table_cache_test_path: Path): @@ -65,17 +65,17 @@ def test_no_substation_coordinates(workbook_table_cache_test_path: Path, mocker) assert len(sub_regions_template.columns) == 4 -def test_substation_coordinate_http_error( - workbook_table_cache_test_path: Path, requests_mock, caplog -): - url = "https://services.ga.gov.au/gis/services/Foundation_Electricity_Infrastructure/MapServer/WFSServer" - requests_mock.get(url, status_code=404) - # Run the test and expect an HTTPError - with caplog.at_level(logging.WARNING): - filepath = workbook_table_cache_test_path / Path( - "sub_regional_reference_nodes.csv" - ) - sub_regional_reference_nodes = pd.read_csv(filepath) - sub_regions_template = _template_sub_regions(sub_regional_reference_nodes) - assert "Failed to fetch substation coordinates" in caplog.text - assert "Network node data will be templated without coordinate data" in caplog.text +# def test_substation_coordinate_http_error( +# workbook_table_cache_test_path: Path, requests_mock, caplog +# ): +# url = "https://services.ga.gov.au/gis/services/Foundation_Electricity_Infrastructure/MapServer/WFSServer" +# requests_mock.get(url, status_code=404) +# # Run the test and expect an HTTPError +# with caplog.at_level(logging.WARNING): +# filepath = workbook_table_cache_test_path / Path( +# "sub_regional_reference_nodes.csv" +# ) +# sub_regional_reference_nodes = pd.read_csv(filepath) +# sub_regions_template = _template_sub_regions(sub_regional_reference_nodes) +# assert "Failed to fetch substation coordinates" in caplog.text +# assert "Network node data will be templated without coordinate data" in caplog.text diff --git a/tests/test_templater/test_rez_transmission_costs.py b/tests/test_templater/test_rez_transmission_costs.py new file mode 100644 index 0000000..c2b9882 --- /dev/null +++ b/tests/test_templater/test_rez_transmission_costs.py @@ -0,0 +1,240 @@ +import numpy as np +import pandas as pd +import pytest + +from ispypsa.templater.flow_paths import ( + _get_augmentation_table, + _get_cost_table, + _get_least_cost_options, + _template_rez_transmission_costs, + process_transmission_costs, +) +from ispypsa.templater.mappings import ( + _REZ_CONFIG, + _REZ_PREPATORY_ACTIVITIES_NAME_TO_REZ_AND_OPTION_NAME, +) + + +def test_template_rez_transmission_costs_simple_least_cost_option(): + # Augmentation tables for SWQLD1 and SWV1 REZs + aug_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "Additional network capacity (MW)": [100, 200, 40], + } + ) + aug_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1", "SWV1"], + "Option": [ + "Option 1A", + "Option 1B", + "Option 2A", + ], + "Additional network capacity (MW)": [150, 70, 120], + } + ) + # Cost tables for SWQLD1 and SWV1 REZs + # Option 2 is least cost and has the largest increase so should be chosen. + cost_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "2024_25": [50, 40, 60], + "2025_26": [55, 45, 65], + } + ) + # Option 1A is least cost and has the largest increase so should be chosen. + cost_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1", "SWV1"], + "Option": ["Option 1A", "Option 1B", "Option 2A"], + "2024_25": [70, 80, 100], + "2025_26": [75, 85, 110], + } + ) + # Preparatory activities table (should not be chosen due to higher costs) + # Using entries that exist in _REZ_PREPATORY_ACTIVITIES_NAME_TO_REZ_AND_OPTION_NAME + prep_acts = pd.DataFrame( + { + "REZ": [ + "Darling Downs REZ Expansion(Stage 1)", + "South West Victoria REZ Option 1A", + ], + "2024_25": [100, 110], + "2025_26": [110, 120], + } + ) + + # Compose iasr_tables dict with correct table names + iasr_tables = { + "rez_augmentation_options_QLD": aug_table_swqld, + "rez_augmentation_options_VIC": aug_table_swv, + "rez_augmentation_costs_progressive_change_QLD": cost_table_swqld, + "rez_augmentation_costs_progressive_change_VIC": cost_table_swv, + "rez_augmentation_costs_progressive_change_preparatory_activities": prep_acts, + } + scenario = "Progressive Change" + # Run function + result = _template_rez_transmission_costs(iasr_tables, scenario, ["SWV1", "SWQLD1"]) + # Check least cost options are chosen for SWQLD1 and SWV1 + swqld_row = result[result["rez_constraint_id"] == "SWQLD1"] + swv_row = result[result["rez_constraint_id"] == "SWV1"] + assert swqld_row["option"].iloc[0] == "Option 2" + assert swv_row["option"].iloc[0] == "Option 1A" + # Check additional_network_capacity_mw is correct + assert swqld_row["additional_network_capacity_mw"].iloc[0] == 200 + assert swv_row["additional_network_capacity_mw"].iloc[0] == 150 + # Check cost per year column is correct (cost divided by capacity) + # For SWQLD1 Option 2: 2024_25 = 40/200 = 0.2, 2025_26 = 45/200 = 0.225 + # For SWV1 Option 1A: 2024_25 = 70/150 ≈ 0.4667, 2025_26 = 75/150 = 0.5 + assert abs(swqld_row["2024_25_$/mw"].iloc[0] - 0.2) < 1e-6 + assert abs(swqld_row["2025_26_$/mw"].iloc[0] - 0.225) < 1e-6 + assert abs(swv_row["2024_25_$/mw"].iloc[0] - (70 / 150)) < 1e-6 + assert abs(swv_row["2025_26_$/mw"].iloc[0] - 0.5) < 1e-6 + + +def test_template_rez_transmission_costs_prep_activities_chosen(): + """ + The cost of the non preparatory activities have been made very high + and therefore preparatory activities should be chosen. + """ + # Augmentation tables for SWQLD1 and SWV1 REZs + aug_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "Additional network capacity (MW)": [100, 150, 200], + } + ) + aug_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1", "SWV1"], + "Option": ["Option 1A", "Option 1B", "Option 2A"], + "Additional network capacity (MW)": [140, 150, 160], + } + ) + # Standard cost tables - options that have costs in prep activities should have NaN here + cost_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "2024_25": [ + np.nan, + 1000, + 1000, + ], # Option 1 has NaN since it's in prep activities + "2025_26": [np.nan, 1000, 1000], + } + ) + cost_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1", "SWV1"], + "Option": ["Option 1A", "Option 1B", "Option 2A"], + "2024_25": [ + 1000, + 1000, + np.nan, + ], # Option 2A has NaN since it's in prep activities + "2025_26": [1000, 1000, np.nan], + } + ) + # Preparatory activities table (set low cost) + # Using entries that exist in _REZ_PREPATORY_ACTIVITIES_NAME_TO_REZ_AND_OPTION_NAME + prep_acts = pd.DataFrame( + { + "REZ": [ + "Darling Downs REZ Expansion(Stage 1)", + "South West Victoria REZ Option 2A", + ], + "2024_25": [10, 15], + "2025_26": [20, 25], + } + ) + + # Compose iasr_tables dict + iasr_tables = { + "rez_augmentation_options_QLD": aug_table_swqld, + "rez_augmentation_options_VIC": aug_table_swv, + "rez_augmentation_costs_progressive_change_QLD": cost_table_swqld, + "rez_augmentation_costs_progressive_change_VIC": cost_table_swv, + "rez_augmentation_costs_progressive_change_preparatory_activities": prep_acts, + } + scenario = "Progressive Change" + # Run function + result = _template_rez_transmission_costs(iasr_tables, scenario, ["SWV1", "SWQLD1"]) + # Check that the prep activity is chosen for SWQLD1 and SWV1 + swqld_row = result[result["rez_constraint_id"] == "SWQLD1"] + swv_row = result[result["rez_constraint_id"] == "SWV1"] + assert swqld_row["option"].iloc[0] == "Option 1" + assert swv_row["option"].iloc[0] == "Option 2A" + # Check additional_network_capacity_mw is correct + assert swqld_row["additional_network_capacity_mw"].iloc[0] == 100 + assert swv_row["additional_network_capacity_mw"].iloc[0] == 160 + # Check cost per year column is correct (cost divided by capacity) + assert abs(swqld_row["2024_25_$/mw"].iloc[0] - (10 / 100)) < 1e-6 + assert abs(swqld_row["2025_26_$/mw"].iloc[0] - (20 / 100)) < 1e-6 + assert abs(swv_row["2024_25_$/mw"].iloc[0] - (15 / 160)) < 1e-6 + assert abs(swv_row["2025_26_$/mw"].iloc[0] - (25 / 160)) < 1e-6 + + +def test_template_rez_transmission_costs_use_first_year_with_valid_costs(): + """ + Test that the first year with non-nan cost data for all options is used. + """ + # SWQLD1: only 2025_26 has all non-nan costs + aug_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "Additional network capacity (MW)": [150, 150, 150], + } + ) + # Even though option 3 is cheaper than option 2 in 2024_25, option 1 should get + # chosen because 2025_26 is used as the comparison year and it has the lowest cost there. + cost_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "2024_25": [np.nan, 50, 10], + "2025_26": [35, 45, 50], + } + ) + # SWV1: all years have valid costs + aug_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1"], + "Option": ["Option 1A", "Option 1B"], + "Additional network capacity (MW)": [90, 100], + } + ) + cost_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1"], + "Option": ["Option 1A", "Option 1B"], + "2024_25": [100, 10], + "2025_26": [10, 100], + } + ) + iasr_tables = { + "rez_augmentation_options_QLD": aug_table_swqld, + "rez_augmentation_options_VIC": aug_table_swv, + "rez_augmentation_costs_progressive_change_QLD": cost_table_swqld, + "rez_augmentation_costs_progressive_change_VIC": cost_table_swv, + } + scenario = "Progressive Change" + result = _template_rez_transmission_costs(iasr_tables, scenario, ["SWV1", "SWQLD1"]) + # SWQLD1: Only 2025_26 has all non-nan costs, so selection is based on that year for all years + swqld_row = result[result["rez_constraint_id"] == "SWQLD1"] + assert swqld_row["option"].iloc[0] == "Option 1" + assert swqld_row["additional_network_capacity_mw"].iloc[0] == 150 + assert np.isnan(swqld_row["2024_25_$/mw"].iloc[0]) + assert abs(swqld_row["2025_26_$/mw"].iloc[0] - (35 / 150)) < 1e-6 + # SWV1: both years valid, Option 1B is the least cost only in first, + # but should be chosen on this basis. + swv_row = result[result["rez_constraint_id"] == "SWV1"] + assert swv_row["option"].iloc[0] == "Option 1B" + assert swv_row["additional_network_capacity_mw"].iloc[0] == 100 + assert abs(swv_row["2024_25_$/mw"].iloc[0] - (10 / 100)) < 1e-6 + assert abs(swv_row["2025_26_$/mw"].iloc[0] - (100 / 100)) < 1e-6 diff --git a/tests/test_translator/test_lines.py b/tests/test_translator/test_lines.py new file mode 100644 index 0000000..1847462 --- /dev/null +++ b/tests/test_translator/test_lines.py @@ -0,0 +1,359 @@ +import io +import re + +import pandas as pd +import pytest + +from ispypsa.translator.lines import ( + _translate_existing_flow_path_capacity_to_lines, + _translate_expansion_costs_to_lines, + _translate_flow_paths_to_lines, +) + + +def test_translate_existing_flow_path_capacity_to_lines(csv_str_to_df): + """Test that existing flow paths are correctly translated to lines.""" + # Create sample data for testing + existing_flow_paths_csv = """ + flow_path, carrier, node_from, node_to, forward_direction_mw_summer_typical + PathA-PathB, AC, NodeA, NodeB, 1000 + PathB-PathC, AC, NodeB, NodeC, 2000 + """ + existing_flow_paths = csv_str_to_df(existing_flow_paths_csv) + + # Expected result + expected_lines_csv = """ + name, carrier, bus0, bus1, s_nom, capital_cost, s_nom_extendable + PathA-PathB_existing, AC, NodeA, NodeB, 1000, , False + PathB-PathC_existing, AC, NodeB, NodeC, 2000, , False + """ + expected_lines = csv_str_to_df(expected_lines_csv) + expected_lines["capital_cost"] = pd.to_numeric( + expected_lines["capital_cost"], errors="coerce" + ) + + # Convert the flow paths to lines + result = _translate_existing_flow_path_capacity_to_lines(existing_flow_paths) + + # Assert the results match expectations + pd.testing.assert_frame_equal( + result.sort_index(axis=1), expected_lines.sort_index(axis=1) + ) + + +def test_translate_expansion_costs_to_lines(csv_str_to_df): + """Test that flow path expansion costs are correctly translated to lines.""" + # Create sample data for testing + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw, 2026_27_$/mw + NodeA-NodeB, 500, , 1200 + NodeB-NodeC, 800, 1500, 1800 + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + existing_lines_csv = """ + name, carrier, bus0, bus1, s_nom + NodeA-NodeB_existing, AC, NodeA, NodeB, 1000 + NodeB-NodeC_existing, AC, NodeB, NodeC, 2000 + """ + existing_lines_df = csv_str_to_df(existing_lines_csv) + + investment_periods = [2026, 2027] + year_type = "fy" + wacc = 0.07 + asset_lifetime = 30 + + result = _translate_expansion_costs_to_lines( + flow_path_expansion_costs, + existing_lines_df, + investment_periods, + year_type, + wacc, + asset_lifetime, + id_column="flow_path", + match_column="name", + ) + + # Expected result structure - use a fixed capital_cost for assertion purposes + # The actual values depend on the annuitization formula + expected_result_csv = """ + name, bus0, bus1, carrier, s_nom, s_nom_extendable, build_year, lifetime + NodeB-NodeC_exp_2026, NodeB, NodeC, AC, 0.0, True, 2026, 30 + NodeA-NodeB_exp_2027, NodeA, NodeB, AC, 0.0, True, 2027, 30 + NodeB-NodeC_exp_2027, NodeB, NodeC, AC, 0.0, True, 2027, 30 + """ + expected_result = csv_str_to_df(expected_result_csv) + + # Sort both result and expected result for comparison + result = result.sort_values(["name"]).reset_index(drop=True) + expected_result = expected_result.sort_values(["name"]).reset_index(drop=True) + + # Check capital costs separately - should be greater than 0 + assert all(result["capital_cost"] > 0) + result = result.drop(columns="capital_cost") + + # Check that column names match + assert set(expected_result.columns).issubset(set(result.columns)) + + # Check all columns except capital_cost (which uses the annuitization formula) + for col in expected_result.columns: + pd.testing.assert_series_equal( + result[col], + expected_result[col], + check_dtype=False, # Allow float vs int differences + check_names=False, # Ignore index names + ) + + +def test_translate_expansion_costs_to_lines_empty(csv_str_to_df): + """Test that empty flow path expansion costs result in empty DataFrame.""" + # Create empty DataFrame + flow_path_expansion_costs_csv = """ + flow_path,additional_network_capacity_mw,2025_26_$/mw + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + existing_lines_csv = """ + name, carrier, bus0, bus1, s_nom + PathA-PathB_existing, AC, NodeA, NodeB, 1000 + """ + existing_lines_df = csv_str_to_df(existing_lines_csv) + + result = _translate_expansion_costs_to_lines( + flow_path_expansion_costs, + existing_lines_df, + [2026], + "fy", + 0.07, + 30, + id_column="flow_path", + match_column="name", + ) + + # The result should be an empty DataFrame + assert result.empty + + +def test_translate_expansion_costs_to_lines_no_matching_years(csv_str_to_df): + """Test when none of the expansion costs match the investment periods.""" + # Create sample data for testing + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw + PathA-PathB, 500, 1000 + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + existing_lines_csv = """ + name, carrier, bus0, bus1, s_nom + PathA-PathB_existing, AC, NodeA, NodeB, 1000 + """ + existing_lines_df = csv_str_to_df(existing_lines_csv) + + # Investment periods don't include 2026 + investment_periods = [2027, 2028] + year_type = "fy" + wacc = 0.07 + asset_lifetime = 30 + + # Call the function with updated parameters + result = _translate_expansion_costs_to_lines( + flow_path_expansion_costs, + existing_lines_df, + investment_periods, + year_type, + wacc, + asset_lifetime, + id_column="flow_path", + match_column="name", + ) + + # The result should be an empty DataFrame since no years match + assert result.empty + + +def test_translate_flow_paths_to_lines_with_expansion(csv_str_to_df): + """Test that flow paths are translated to lines with expansion.""" + # Create sample input data + flow_paths_csv = """ + flow_path, carrier, node_from, node_to, forward_direction_mw_summer_typical + PathA-PathB, AC, NodeA, NodeB, 1000 + PathB-PathC, AC, NodeB, NodeC, 2000 + """ + + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw, 2026_27_$/mw + PathA-PathB, 500, 1000, 1200 + PathB-PathC, 800, 1500, 1800 + """ + + ispypsa_tables = { + "flow_paths": csv_str_to_df(flow_paths_csv), + "flow_path_expansion_costs": csv_str_to_df(flow_path_expansion_costs_csv), + } + + # Mock config with expansion enabled + class MockTemporalConfig: + class MockCapacityExpansion: + investment_periods = [2026, 2027] + + year_type = "fy" + capacity_expansion = MockCapacityExpansion() + + class MockNetworkConfig: + annuitisation_lifetime = 30 + transmission_expansion = True # This is the key parameter needed + + class MockConfig: + temporal = MockTemporalConfig() + network = MockNetworkConfig() + wacc = 0.07 + + config = MockConfig() + + # Call the function + result = _translate_flow_paths_to_lines(ispypsa_tables, config) + + # Check the result is of the expected length + assert len(result) == 6 + + # Check that the result includes both existing and expansion lines + assert any("_existing" in name for name in result["name"]) + assert any("_exp_" in name for name in result["name"]) + + +def test_translate_flow_paths_to_lines_without_expansion(csv_str_to_df): + """Test that flow paths are translated to lines without expansion.""" + # Create sample input data + flow_paths_csv = """ + flow_path, carrier, node_from, node_to, forward_direction_mw_summer_typical + PathA-PathB, AC, NodeA, NodeB, 1000 + PathB-PathC, AC, NodeB, NodeC, 2000 + """ + + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw, 2026_27_$/mw + PathA-PathB, 500, 1000, 1200 + PathB-PathC, 800, 1500, 1800 + """ + + ispypsa_tables = { + "flow_paths": csv_str_to_df(flow_paths_csv), + "flow_path_expansion_costs": csv_str_to_df(flow_path_expansion_costs_csv), + } + + # Mock config with expansion disabled + class MockTemporalConfig: + class MockCapacityExpansion: + investment_periods = [2026, 2027] + + year_type = "fy" + capacity_expansion = MockCapacityExpansion() + + class MockNetworkConfig: + annuitisation_lifetime = 30 + transmission_expansion = False # This is the key parameter needed + + class MockConfig: + temporal = MockTemporalConfig() + network = MockNetworkConfig() + wacc = 0.07 + + config = MockConfig() + + # Call the function + result = _translate_flow_paths_to_lines(ispypsa_tables, config) + + # Expected result - only existing lines, no expansion lines + expected_result_csv = """ + name, bus0, bus1, s_nom, capital_cost, s_nom_extendable, carrier + PathA-PathB_existing, NodeA, NodeB, 1000, , False, AC + PathB-PathC_existing, NodeB, NodeC, 2000, , False, AC + """ + expected_result = csv_str_to_df(expected_result_csv) + expected_result["capital_cost"] = pd.to_numeric( + expected_result["capital_cost"], errors="coerce" + ) + + # Sort both dataframes for comparison + result = result.sort_values("name").reset_index(drop=True) + expected_result = expected_result.sort_values("name").reset_index(drop=True) + + # Assert the results match expectations + for col in expected_result.columns: + pd.testing.assert_series_equal( + result[col], + expected_result[col], + check_dtype=False, + check_names=False, + ) + + +def test_translate_expansion_costs_to_lines_calendar_year_error(csv_str_to_df): + """Test that calendar year type raises a NotImplementedError.""" + # Create sample data + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw + PathA-PathB, 500, 1000 + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + existing_lines_csv = """ + name, carrier, bus0, bus1, s_nom + PathA-PathB_existing, AC, NodeA, NodeB, 1000 + """ + existing_lines_df = csv_str_to_df(existing_lines_csv) + + investment_periods = [2026] + year_type = "calendar" # This should trigger the error + wacc = 0.07 + asset_lifetime = 30 + + # Check that the correct error is raised + with pytest.raises( + NotImplementedError, + match="Calendar years not implemented for transmission costs", + ): + _translate_expansion_costs_to_lines( + flow_path_expansion_costs, + existing_lines_df, + investment_periods, + year_type, + wacc, + asset_lifetime, + id_column="flow_path", + match_column="name", + ) + + +def test_translate_expansion_costs_to_lines_invalid_year_type(csv_str_to_df): + """Test that an invalid year type raises a ValueError.""" + # Create sample data + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw + PathA-PathB, 500, 1000 + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + existing_lines_csv = """ + name, carrier, bus0, bus1, s_nom + PathA-PathB_existing, AC, NodeA, NodeB, 1000 + """ + existing_lines_df = csv_str_to_df(existing_lines_csv) + + investment_periods = [2026] + year_type = "invalid_year_type" # This should trigger the error + wacc = 0.07 + asset_lifetime = 30 + + # Check that the correct error is raised + with pytest.raises(ValueError, match="Unknown year_type"): + _translate_expansion_costs_to_lines( + flow_path_expansion_costs, + existing_lines_df, + investment_periods, + year_type, + wacc, + asset_lifetime, + id_column="flow_path", + match_column="name", + ) diff --git a/tests/test_translator/test_translate_custom_constraints.py b/tests/test_translator/test_translate_custom_constraints.py index 170b314..5d36306 100644 --- a/tests/test_translator/test_translate_custom_constraints.py +++ b/tests/test_translator/test_translate_custom_constraints.py @@ -2,6 +2,7 @@ import pandas as pd from ispypsa.translator.custom_constraints import ( + _translate_custom_constraint_generators_to_lhs, _translate_custom_constraint_lhs, _translate_custom_constraint_rhs, _translate_custom_constraints_generators, @@ -9,31 +10,41 @@ def test_translate_custom_constraints_generators(): - ispypsa_custom_constraint_gens = pd.DataFrame( + constraint_expansion_costs = pd.DataFrame( { - "variable_name": ["X", "Y"], - "constraint_id": ["A", "B"], - "indicative_transmission_expansion_cost_$/mw": [0.0, np.nan], + "rez_constraint_id": ["A", "B"], + "2025_26_$/mw": [9.0, np.nan], + "2026_27_$/mw": [10.0, 15.0], } ) expected_pypsa_custom_constraint_gens = pd.DataFrame( { - "name": ["X", "Y"], - "constraint_name": ["A", "B"], - "capital_cost": [0.0, np.nan], + "name": ["A_exp_2026", "A_exp_2027", "B_exp_2027"], + "constraint_name": ["A", "A", "B"], "bus": "bus_for_custom_constraint_gens", - "p_nom": [0.0, 0.0], - "p_nom_extendable": [True, False], + "p_nom": [0.0, 0.0, 0.0], + "p_nom_extendable": [True, True, True], + "build_year": [2026, 2027, 2027], + "lifetime": 10, } ) pypsa_custom_constraint_gens = _translate_custom_constraints_generators( - [ispypsa_custom_constraint_gens], - expansion_on=True, + ["A", "B"], + constraint_expansion_costs, wacc=5.0, asset_lifetime=10, + investment_periods=[2026, 2027], + year_type="fy", ) + + assert all(pypsa_custom_constraint_gens["capital_cost"] > 0) + pypsa_custom_constraint_gens = pypsa_custom_constraint_gens.drop( + columns="capital_cost" + ) + pd.testing.assert_frame_equal( - expected_pypsa_custom_constraint_gens, pypsa_custom_constraint_gens + expected_pypsa_custom_constraint_gens, + pypsa_custom_constraint_gens, ) @@ -88,3 +99,21 @@ def test_translate_custom_constraints_lhs(): pd.testing.assert_frame_equal( expected_pypsa_custom_constraint_lhs, pypsa_custom_constraint_lhs ) + + +def test_translate_custom_constraint_generators_to_lhs(csv_str_to_df): + custom_constraint_generators = """ + constraint_name, name + XY, B + """ + custom_constraint_generators = csv_str_to_df(custom_constraint_generators) + expected_lhs_definition = """ + constraint_id, term_type, term_id, coefficient + XY, generator_capacity, B, -1.0 + """ + expected_lhs_definition = csv_str_to_df(expected_lhs_definition) + + lhs_definition = _translate_custom_constraint_generators_to_lhs( + custom_constraint_generators + ) + pd.testing.assert_frame_equal(expected_lhs_definition, lhs_definition) diff --git a/tests/test_translator_and_model/test_time_varying_flow_path_costs.py b/tests/test_translator_and_model/test_time_varying_flow_path_costs.py new file mode 100644 index 0000000..ad7aeb9 --- /dev/null +++ b/tests/test_translator_and_model/test_time_varying_flow_path_costs.py @@ -0,0 +1,227 @@ +from pathlib import Path + +import pandas as pd +import pypsa +import pytest +from isp_trace_parser.demand_traces import write_new_demand_filepath + +from ispypsa.config import ModelConfig +from ispypsa.model.build import build_pypsa_network +from ispypsa.translator.create_pypsa_friendly_inputs import ( + create_pypsa_friendly_inputs, + create_pypsa_friendly_snapshots, + create_pypsa_friendly_timeseries_inputs, +) + + +def test_line_expansion_economic_timing(csv_str_to_df, tmp_path, monkeypatch): + """Test that line expansion occurs when it becomes economically viable. + + This test creates a simple two-region network (A and B) where: + - Region A has an expensive generator and fixed demand of 100 MW + - Region B has a cheap generator and no demand + - The existing transmission line can only carry 50 MW (half the demand) + - Line expansion costs change between years, making expansion economic in year 2 + + The test uses the translator to convert ISPyPSA format tables to PyPSA format. + """ + # Create directories + ispypsa_dir = tmp_path / "ispypsa_inputs" + ispypsa_dir.mkdir() + pypsa_dir = tmp_path / "pypsa_inputs" + pypsa_dir.mkdir() + traces_dir = tmp_path / "traces" + traces_dir.mkdir() + + # Create subdirectories for traces + for subdir in ["demand", "wind", "solar"]: + (traces_dir / subdir).mkdir() + + # Mock environment variable for trace parser + monkeypatch.setenv("PATH_TO_PARSED_TRACES", str(traces_dir)) + + # Create a mock config + config_dict = { + "ispypsa_run_name": "test", + "scenario": "Step Change", + "wacc": 0.07, + "discount_rate": 0.05, + "network": { + "transmission_expansion": True, + "rez_transmission_expansion": False, + "annuitisation_lifetime": 30, + "nodes": { + "regional_granularity": "sub_regions", + "rezs": "attached_to_parent_node", + }, + "rez_to_sub_region_transmission_default_limit": 1e5, + }, + "temporal": { + "path_to_parsed_traces": "ENV", + "year_type": "fy", + "range": { + "start_year": 2025, + "end_year": 2026, + }, + "capacity_expansion": { + "resolution_min": 30, + "reference_year_cycle": [2018], + "investment_periods": [2025, 2026], + "aggregation": { + "representative_weeks": None, # Use full year + }, + }, + }, + "unserved_energy": { + "cost": 10000.0, + "generator_size_mw": 1000.0, + }, + "solver": "highs", + "iasr_workbook_version": "6.0", + } + + demand_data_to_write = [ + ("2024-08-01 00:00:00", 0.0, "A", "2024-2"), + ("2025-05-01 00:00:00", 250.0, "A", "2025-1"), + ("2025-08-01 00:00:00", 0.0, "A", "2025-2"), + ("2026-05-01 00:00:00", 250.0, "A", "2026-1"), + ("2024-08-01 00:00:00", 0.0, "B", "2024-2"), + ("2025-05-01 00:00:00", 0.0, "B", "2025-1"), + ("2025-08-01 00:00:00", 0.0, "B", "2025-2"), + ("2026-05-01 00:00:00", 0.0, "B", "2026-1"), + ] + + for date_time, demand, subregion, half_year in demand_data_to_write: + demand_data = pd.DataFrame({"Datetime": [date_time], "Value": [demand]}) + demand_data["Datetime"] = pd.to_datetime(demand_data["Datetime"]) + file_meta_data = { + "subregion": subregion, + "scenario": "Step Change", + "reference_year": 2018, + "poe": "POE50", + "demand_type": "OPSO_MODELLING", + "hy": half_year, + } + file_path = Path( + traces_dir / "demand" / write_new_demand_filepath(file_meta_data) + ) + file_path.parent.mkdir(parents=True, exist_ok=True) + demand_data.to_parquet(file_path, index=False) + + # Define ISPyPSA input tables + + # Sub-regions table + sub_regions_csv = """ + isp_sub_region_id, nem_region_id, sub_region_reference_node, sub_region_reference_node_voltage_kv + A, A, A Reference Node, 500 + B, B, B Reference Node, 500 + """ + sub_regions = csv_str_to_df(sub_regions_csv) + + # Flow paths table + flow_paths_csv = """ + flow_path, node_from, node_to, carrier, forward_direction_mw_summer_typical, reverse_direction_mw_summer_typical + A-B, A, B, AC, 50, 50 + """ + flow_paths = csv_str_to_df(flow_paths_csv) + + # Flow path expansion costs table + flow_path_expansion_costs_csv = """ + flow_path, option, additional_network_capacity_mw, 2024_25_$/mw, 2025_26_$/mw + A-B, Opt1, 100, 1000000, 0.0 + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + # ECAA Generators table (existing generators) + # At the moment Brown Coal cost is hard coded to 30 $/MWh and Liquid Fuel to + # 400 $/MWh. "__" gets converted to a space. + ecaa_generators_csv = """ + generator, fuel_type, sub_region_id, maximum_capacity_mw + expensive_generator_A, Liquid__Fuel, A, 200 + cheap_generator_B, Brown__Coal, B, 200 + """ + ecaa_generators = csv_str_to_df(ecaa_generators_csv) + + # Minimal versions of other required tables + new_entrant_generators_csv = """ + generator, fuel_type, technology_type, sub_region_id + """ + new_entrant_generators = csv_str_to_df(new_entrant_generators_csv) + + # Collect all ISPyPSA tables + ispypsa_tables = { + "sub_regions": sub_regions, + "flow_paths": flow_paths, + "flow_path_expansion_costs": flow_path_expansion_costs, + "ecaa_generators": ecaa_generators, + # Add empty DataFrames for other tables + "new_entrant_generators": pd.DataFrame(), + } + + # Create a ModelConfig instance + config = ModelConfig(**config_dict) + + # Translate ISPyPSA tables to PyPSA-friendly format + pypsa_tables = create_pypsa_friendly_inputs(config, ispypsa_tables) + + # Manually create a short hardcoded snapshots so the model works with our short + # time series data. + snapshots = pd.DataFrame( + { + "investment_periods": [2025, 2026], + "snapshots": ["2025-05-01 00:00:00", "2026-05-01 00:00:00"], + } + ) + snapshots["snapshots"] = pd.to_datetime(snapshots["snapshots"]) + + # Override the longer snapshots that would have auto generated. + pypsa_tables["snapshots"] = snapshots + + # l = pypsa_tables["lines"].copy() + # l["name"] = "copy" + # l = pd.concat(([pypsa_tables["lines"], l])) + # pypsa_tables["lines"] = l + + # Create timeseries data directory structure for PyPSA inputs + pypsa_timeseries_dir = pypsa_dir / "timeseries" + pypsa_timeseries_dir.mkdir(parents=True) + + # Create demand traces for the network model + create_pypsa_friendly_timeseries_inputs( + config=config, + model_phase="capacity_expansion", + ispypsa_tables=ispypsa_tables, + snapshots=snapshots, + parsed_traces_directory=traces_dir, + pypsa_friendly_timeseries_inputs_location=pypsa_timeseries_dir, + ) + + # Build the network model + network = build_pypsa_network( + pypsa_friendly_tables=pypsa_tables, + path_to_pypsa_friendly_timeseries_data=pypsa_timeseries_dir, + ) + + # Solve the optimization problem + network.optimize() + network.optimize.solve_model(solver_name=config.solver) + + # Get expansion line names from the network + expansion_lines = [line for line in network.lines.index if "_exp_" in line] + + # Extract year from line name + line_years = {line: int(line.split("_exp_")[1]) for line in expansion_lines} + + # Check line expansion results + for line, year in line_years.items(): + capacity = network.lines.loc[line, "s_nom_opt"] + if year == 2025: + # In 2025, expansion is too expensive, so it should be 0 + assert capacity == 0, f"Line expansion in 2025 should be 0, got {capacity}" + elif year == 2026: + # In 2026, expansion is cheaper, so it should be > 0 + assert capacity > 0, f"Line expansion in 2026 should be > 0, got {capacity}" + # The expansion should be approximately 50 MW (to meet the remaining demand) + assert 45 <= capacity <= 55, ( + f"Line expansion in 2026 should be ~50 MW, got {capacity} MW" + ) diff --git a/tests/test_workbook_table_cache/flow_path_augmentation_costs_step_change_and_green_energy_exports_CNSW-NNSW.csv b/tests/test_workbook_table_cache/flow_path_augmentation_costs_step_change_and_green_energy_exports_CNSW-NNSW.csv new file mode 100644 index 0000000..3ecdc14 --- /dev/null +++ b/tests/test_workbook_table_cache/flow_path_augmentation_costs_step_change_and_green_energy_exports_CNSW-NNSW.csv @@ -0,0 +1,6 @@ +Flow path,Option Name,2021-22,2022-23,2023-24,2024-25,2025-26,2026-27,2027-28,2028-29,2029-30,2030-31,2031-32,2032-33,2033-34,2034-35,2035-36,2036-37,2037-38,2038-39,2039-40,2040-41,2041-42,2042-43,2043-44,2044-45,2045-46,2046-47,2047-48,2048-49,2049-50,2050-51,2051-52,2052-53,2053-54 +CNSW-NNSW,CNSW-NNSW Option 1,1834410577.13,1839984585.4913588,1870490675.9237769,1901016121.5390062,1920883577.9873168,1930908055.5877812,1943401003.800204,1955223742.3465283,1966540533.8132858,1977435998.7638278,1979577429.9072478,1981758153.365226,1983978169.1377623,1986237477.2248569,1988526254.5478702,1990864147.2640808,1993241332.29485,1995657809.6401772,1998113579.3000627,2000539879.7240293,2002976003.2266355,2005412126.7292418,2007838427.1532087,2010274550.655815,2012710674.1584213,2015146797.6610277,2017582921.1636338,2020019044.6662402,2022455168.1688464,2024891291.6714528,2027327415.174059,2029763538.6766653,2032199662.1792715 +CNSW-NNSW,CNSW-NNSW Option 2,1492549482.75,1498368669.1638362,1522299684.6067889,1547240613.4523966,1564450054.2369545,1573439757.1331573,1584194116.2320602,1594242888.5867147,1603759294.408156,1614609348.358562,1616684033.8505893,1618796786.9663234,1620947607.7057645,1623136496.0689127,1625353935.1498408,1627618958.7604027,1629922049.9946716,1632263208.852647,1634642435.3343298,1636993111.0982323,1639353303.7680616,1641713496.4378908,1644064172.2017932,1646424364.8716223,1648784557.5414515,1651144750.2112808,1653504942.88111,1655865135.550939,1658225328.2207682,1660585520.8905976,1662945713.5604267,1665305906.230256,1667666098.9000852 +CNSW-NNSW,CNSW-NNSW Option 3,2451577025.7,2463020830.0122647,2502997770.0901384,2545275893.483528,2575674869.1368957,2591488625.803442,2609967764.215165,2627209212.818076,2643522207.6386623,2666347675.2928495,2670535825.244838,2674800821.9849367,2679142665.513145,2683561355.8294635,2688037681.236864,2692610065.129402,2697259295.8100495,2701985373.2788076,2706788297.5356755,2711533586.7014604,2716298087.5642734,2721062588.427086,2725807877.592871,2730572378.455684,2735336879.3184967,2740101380.181309,2744865881.044122,2749630381.9069347,2754394882.7697477,2759159383.6325603,2763923884.495373,2768688385.3581862,2773452886.2209983 +CNSW-NNSW,CNSW-NNSW Option 4,2544205834.44,2570694957.328485,2622183967.2558355,2666780915.346864,2694522775.7770452,2707824210.660078,2721435908.9473414,2735654907.380341,2750909197.5317435,2762078944.579713,2764401889.2216697,2766767456.701094,2769175647.017986,2771626460.1723437,2774109240.454803,2776645299.284096,2779223980.9508553,2781845285.4550824,2784509212.7967763,2787141173.0103703,2789783788.9333305,2792426404.8562913,2795058365.069885,2797700980.9928455,2800343596.915806,2802986212.8387666,2805628828.7617273,2808271444.684687,2810914060.607648,2813556676.530608,2816199292.453569,2818841908.376529,2821484524.29949 +CNSW-NNSW,CNSW-NNSW Option 5,2795731374.64,2824605711.510762,2880109217.913806,2929211740.0852356,2960356667.7182274,2975470163.7745647,2990930764.300607,3006892333.8963594,3023860923.289551,3038206140.820386,3040979041.9005356,3043802821.899586,3046677480.8175387,3049603018.654393,3052566715.6804237,3055594011.355082,3058672185.948642,3061801239.4611034,3064981171.892467,3068122945.1346545,3071277438.106567,3074431931.07848,3077573704.3206663,3080728197.292579,3083882690.2644916,3087037183.2364044,3090191676.208317,3093346169.1802297,3096500662.152142,3099655155.124055,3102809648.0959673,3105964141.06788,3109118634.0397925 diff --git a/tests/test_workbook_table_cache/flow_path_augmentation_options_CNSW-NNSW.csv b/tests/test_workbook_table_cache/flow_path_augmentation_options_CNSW-NNSW.csv new file mode 100644 index 0000000..5e16304 --- /dev/null +++ b/tests/test_workbook_table_cache/flow_path_augmentation_options_CNSW-NNSW.csv @@ -0,0 +1,6 @@ +Flow path,Development path,Development Driver,Option Name,Augmentation Description,Forward direction power flow,"Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Forward direction","Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Reverse direction","Indicative cost estimate ($2023, $ million) - Note 2",Cost estimate source,Cost estimate class,Easement length in km,Lead time or Earliest in Service Date & 4,Additional REZ hosting capacity provided +CNSW-NNSW,Near the existing CNSW-NNSW corridor,"Increase thermal capacity and, voltage and transient stability limits of 330 kV lines between Liddell and Armidale. Provide access to Renewable generation in N1 and N2 REZ.",CNSW-NNSW Option 1,• New Central (Hub 5) 500/330 kV substation with 3 x 500/330/33 kV 1500 MVA transformers cut into existing line between Tamworth and Armidale. • New 500 kV DCST line between Central (Hub 5) and Bayswater with Quad Orange conductor. • 4 x 500 kV 150 MVAr line shunt reactors (in total) are required for 500kV DCST line between Central Hub 5 and Bayswater. • New 4 x 330 kV 340 MVA phase shifting transformers at Central (Hub 5).,CNSW to NNSW,3000,3000,1834,AEMO (TCD),Class 5b,225,Sep-28 (See note 10),N2: 2000 +CNSW-NNSW,Near the existing CNSW-NNSW corridor,"Increase thermal capacity and, voltage and transient stability limits of 330 kV lines between Liddell and Armidale. Provide access to Renewable generation in N1 and N2 REZ.",CNSW-NNSW Option 2,• Expand Northern (Hub 10) switching station to 500/330kV substation with 3 x 500/330/33kV 1500 MVA transformers and cut into the existing 330 kV lines between Armidale to Sapphire/Dumaresq • Expand Central South (Hub 1) switching station to 500/330 kV substation with 3 x 500/330/33 kV 1500 MVA transformers. • New 500 kV DCST from Central South (Hub 1) to Bayswater with Quad Orange conductor • Operate line between Central Hub 5 and Central South Hub 1 from 330 kV to 500 kV. • Operate line between Central Hub 5 and Northern Hub 10 from 330 kV to 500 kV. • 4 x 500 kV 150 MVAr line shunt reactors (in total) are required for 500 kV double-circuit line between Central South (Hub 1) and Bayswater.,• Expand Northern (Hub 10) switching station to 500/330kV substation with 3 x 500/330/33kV 1500 MVA transformers and cut into the existing 330 kV lines between Armidale to Sapphire/Dumaresq • Expand Central South (Hub 1) switching station to 500/330 kV substation with 3 x 500/330/33 kV 1500 MVA transformers. • New 500 kV DCST from Central South (Hub 1) to Bayswater with Quad Orange conductor • Operate line between Central Hub 5 and Central South Hub 1 from 330 kV to 500 kV. • Operate line between Central Hub 5 and Northern Hub 10 from 330 kV to 500 kV. • 4 x 500 kV 150 MVAr line shunt reactors (in total) are required for 500 kV double-circuit line between Central South (Hub 1) and Bayswater.,3000,3000,1493,AEMO (TCD),Class 5b,217,Long,N2: 3000 +CNSW-NNSW,Near the existing CNSW-NNSW corridor,"Increase thermal capacity and, voltage and transient stability limits of 330 kV lines between Liddell and Armidale. Provide access to Renewable generation in N1 and N2 REZ.",CNSW-NNSW Option 3,"• New Central South (Hub 1) 500/330 kV substation in New England with 3 x 500/330/33 kV 1500 MVA transformers. • New 330 kV Central (Hub5) switching station in New England and cut into the existing lines between Tamworth and Armidale. • New 500 kV built and initially 330 kV operated double-circuit line from Hub 5 to Hub 1 • New 500 kV double-circuit line between Hub 1 and Bayswater with Quad Orange conductor. • 4 x 500 kV 150 MVAr line shunt reactors (in total) are required for 500 kV double-circuit line between Hub 1 and Bayswater. • Rebuild portion of Line 86 from Hub 5 to Tamworth as 330 kV double-circuit line. • Rebuild Line 88 Tamworth - Muswellbrook and Line 83 Liddell - Muswellbrook as 330 kV double-circuit line. • Augment Hub 5, Tamworth, Muswellbrook and Liddell to accommodate additional lines.","• New Central South (Hub 1) 500/330 kV substation in New England with 3 x 500/330/33 kV 1500 MVA transformers. • New 330 kV Central (Hub5) switching station in New England and cut into the existing lines between Tamworth and Armidale. • New 500 kV built and initially 330 kV operated double-circuit line from Hub 5 to Hub 1 • New 500 kV double-circuit line between Hub 1 and Bayswater with Quad Orange conductor. • 4 x 500 kV 150 MVAr line shunt reactors (in total) are required for 500 kV double-circuit line between Hub 1 and Bayswater. • Rebuild portion of Line 86 from Hub 5 to Tamworth as 330 kV double-circuit line. • Rebuild Line 88 Tamworth - Muswellbrook and Line 83 Liddell - Muswellbrook as 330 kV double-circuit line. • Augment Hub 5, Tamworth, Muswellbrook and Liddell to accommodate additional lines.",3600,3600,2452,AEMO (TCD),Class 5b,225,Long,N1+N2: 3600 +CNSW-NNSW,Near the existing CNSW-NNSW corridor,"Increase thermal capacity and, voltage and transient stability limits of 330 kV lines between Liddell and Armidale. Provide access to Renewable generation in N1 and N2 REZ.",CNSW-NNSW Option 4,• 2000 MW bi-pole HVDC transmission system between locality Bayswater and locality of Hub 5. • A new 330 kV double-circuit line from a new substation in locality of Hub 5 to Armidale. • Reconnect both Tamworth-Armidale 330 kV lines from Armidale to a new substation in locality of Hub 5.,• 2000 MW bi-pole HVDC transmission system between locality Bayswater and locality of Hub 5. • A new 330 kV double-circuit line from a new substation in locality of Hub 5 to Armidale. • Reconnect both Tamworth-Armidale 330 kV lines from Armidale to a new substation in locality of Hub 5.,1750,2000,2544,AEMO (TCD),Class 5b,280,Long,N2: 2000 +CNSW-NNSW,West of the existing CNSW-NNSW corridor,West of the existing CNSW-NNSW corridor,CNSW-NNSW Option 5,• A 2000 MW bi-pole HVDC transmission system between locality of Wollar and locality of Boggabri. • A new 330 kV AC line between locality of Boggabri and Tamworth.,• A 2000 MW bi-pole HVDC transmission system between locality of Wollar and locality of Boggabri. • A new 330 kV AC line between locality of Boggabri and Tamworth.,1750,2000,2796,AEMO (TCD),Class 5b,350,Long,N1: 2000 diff --git a/tests/test_workbook_table_cache/rez_augmentation_costs_step_change_and_green_energy_exports_NSW.csv b/tests/test_workbook_table_cache/rez_augmentation_costs_step_change_and_green_energy_exports_NSW.csv new file mode 100644 index 0000000..c5a319f --- /dev/null +++ b/tests/test_workbook_table_cache/rez_augmentation_costs_step_change_and_green_energy_exports_NSW.csv @@ -0,0 +1,30 @@ +REZ / Constraint ID,REZ Name,Option,2021-22,2022-23,2023-24,2024-25,2025-26,2026-27,2027-28,2028-29,2029-30,2030-31,2031-32,2032-33,2033-34,2034-35,2035-36,2036-37,2037-38,2038-39,2039-40,2040-41,2041-42,2042-43,2043-44,2044-45,2045-46,2046-47,2047-48,2048-49,2049-50,2050-51,2051-52,2052-53,2053-54 +N1,North West NSW,Option 1,4683570694.16,4703516536.587628,4775599569.90908,4855161748.758409,4914331441.04575,4944752228.921412,4980211168.8201885,5013161859.253148,5044274310.512051,5086983852.377509,5095732142.107587,5104640950.91528,5113710278.80059,5122940125.763515,5132290362.034653,5141841247.152812,5151552651.348585,5161424574.621976,5171457016.972982,5181369070.015777,5191321252.827974,5201273435.640174,5211185488.682967,5221137671.495166,5231089854.307364,5241042037.119563,5250994219.931761,5260946402.743958,5270898585.556158,5280850768.368356,5290802951.180554,5300755133.992752,5310707316.80495 +N2,New England,Option 1,370392725.98,372722679.4854966,379824215.2963512,386767380.2319919,391487462.1087017,393870708.99457115,396672750.1753738,399355739.3182176,401941997.47199464,405273274.02934086,405901299.65604,406540848.6887337,407191921.12742186,407854516.9721045,408525755.371283,409211398.02795464,409908564.09062064,410617253.5592813,411337466.4339363,412049036.7540955,412763487.9257533,413477939.0974111,414189509.4175703,414903960.5892281,415618411.76088583,416332862.93254364,417047314.10420144,417761765.2758593,418476216.44751704,419190667.61917484,419905118.79083264,420619569.96249044,421334021.13414824 +N2,New England,Option 2,1004401782,1011204959.0608695,1030510596.3865469,1051698711.1524805,1067967373.7033664,1075384724.442036,1083891439.372567,1080930713.9663014,1088835747.5478656,1098338157.6225178,1101521443.5996315,1104763138.4937568,1108063242.3048933,1111421755.0330405,1114824074.4489467,1118299405.0111167,1121833144.4902983,1125425292.8864906,1129075850.1996946,1132682600.82514,1136303953.6798382,1139925306.5345366,1143532057.159982,1147153410.0146804,1150774762.8693786,1154396115.7240767,1158017468.578775,1161638821.4334733,1165260174.2881715,1168881527.1428697,1172502879.997568,1176124232.852266,1179745585.7069643 +N2,New England,Option 3,647203680.09,651990774.0911866,664965036.4823755,679100470.3606932,690132640.2294028,695099176.6696088,700758267.7185646,706235776.1617585,711574725.4968332,719240649.0630598,721505406.3417673,723811718.7999008,726159586.4374601,728549009.2544451,730969598.4559995,733442131.6318363,735956219.9870988,738511863.5217872,741109062.2359015,743675094.5654461,746251515.6898476,748827936.8142488,751393969.1437936,753970390.2681949,756546811.3925962,759123232.5169975,761699653.6413989,764276074.7658002,766852495.8902014,769428917.0146028,772005338.1390041,774581759.2634053,777158180.3878065 +N3,Central-West Orana,Central West Orana REZ transmission link,Anticipated project,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +N3,Central-West Orana,Option 1,243410039.19,242986106.28472227,247406498.6209051,251298062.38263893,253040851.28678718,253610745.6513225,254692821.9521056,255832786.34974557,257031912.34130797,257043196.70366573,257046109.9703669,257049076.69150293,257052096.86707386,257055170.49707967,257058284.21791163,257061464.75678724,257064698.7500977,257067986.19784302,257071327.10002327,257074627.91137734,257077942.08634013,257081256.26130292,257084557.07265696,257087871.24761972,257091185.4225825,257094499.5975453,257097813.77250808,257101127.94747087,257104442.12243366,257107756.29739648,257111070.47235924,257114384.64732203,257117698.82228482 +N3,Central-West Orana,Option 2,329782042.82,331112953.9979033,336959977.87500226,342841615.74305683,346859456.10402524,348774840.16777277,351114398.03431505,353358435.9742006,355534331.4406314,358388504.1917473,358936636.9318562,359494827.153435,360063074.85648364,360641380.04100215,361227228.33662313,361825648.4840815,362434126.11300975,363052661.2234078,363681253.81527585,364302303.29604137,364925867.1471744,365549430.99830747,366170480.4790731,366794044.3302061,367417608.18133914,368041172.0324722,368664735.88360524,369288299.7347383,369911863.58587134,370535427.43700445,371158991.28813744,371782555.1392705,372406118.99040353 +N3,Central-West Orana,Option 3,272858295.37,274691352.47226685,280108848.34253716,285335945.4057081,288803657.66047937,290527255.91549844,292568214.39660907,294544174.2947216,296464843.77767247,299050274.0024961,299506845.8003534,299971795.0623915,300445121.7886105,300926825.9790104,301414813.26754594,301913272.3863076,302420108.96925,302935323.01637334,303458914.52767766,303976222.94084615,304495625.72006,305015028.4992737,305532336.9124423,306051739.6916561,306571142.4708699,307090545.2500836,307609948.0292975,308129350.8085113,308648753.58772504,309168156.3669389,309687559.1461526,310206961.92536634,310726364.7045802 +N4,Broken Hill,Option 1,5097859988.37,5117220290.986921,5184097635.63244,5254876296.384607,5302026055.489117,5331501877.137287,5365979582.17867,5397057624.394069,5425595998.865156,5457995363.637974,5460280959.514247,5462608492.929535,5464977963.883836,5467389372.377151,5469832234.024727,5472327517.59607,5474864738.706429,5477443897.355801,5480064993.544187,5482654636.578313,5485254763.997192,5487854891.416071,5490444534.450197,5493044661.869077,5495644789.287955,5498244916.706835,5500845044.125714,5503445171.544593,5506045298.963472,5508645426.382351,5511245553.80123,5513845681.22011,5516445808.6389885 +N4,Broken Hill,Option 2,4575560873.27,4636165564.515622,4728471231.646421,4819838797.818658,4879521590.230203,4908737325.32523,4936833673.433418,4965601447.962348,4996493508.43048,5031463204.46298,5037537249.226662,5043722744.352981,5050019689.841935,5056428085.693526,5062920069.317095,5069551365.893958,5076294112.833458,5083148310.135594,5090113957.800368,5096996017.693163,5103905940.176618,5110815862.660072,5117697922.552868,5124607845.036323,5131517767.519777,5138427690.003231,5145337612.486687,5152247534.970141,5159157457.453595,5166067379.937051,5172977302.420505,5179887224.903959,5186797147.387414 +N5,South West NSW,Option 1,1418427595.46,1425333132.6531534,1446268799.9946914,1469948259.1294363,1487141526.8051271,1496070832.9790375,1506444526.7124524,1516145948.2089193,1525369903.3027992,1544376717.5472448,1546736519.9897628,1549139621.5596664,1551586022.2569559,1554075722.081631,1556597896.2518454,1559174194.3312917,1561793791.5381236,1564456687.8723414,1567162883.3339446,1569836604.4500086,1572521150.3479192,1575205696.2458296,1577879417.361894,1580563963.2598045,1583248509.1577146,1585933055.0556252,1588617600.9535356,1591302146.8514462,1593986692.7493567,1596671238.647267,1599355784.5451777,1602040330.4430883,1604724876.3409986 +N5,South West NSW,Option 2,382536056.11,385148834.6004999,391826524.703996,399135083.80676216,404725671.9347864,407486170.8777715,410596647.88537735,413541485.48571044,416364333.7749013,425232036.682354,426186057.5377131,427157583.3628954,428146614.15790075,429153149.92272925,430172814.414925,431214360.1193997,432273410.79369754,433349966.43781835,434444027.0517624,435524958.93833905,436610267.0673715,437695575.1964039,438776507.0829806,439861815.212013,440947123.3410455,442032431.4700779,443117739.5991103,444203047.72814274,445288355.8571752,446373663.9862077,447458972.11524004,448544280.24427253,449629588.3733049 +N5,South West NSW,Option 3,299577968.52,301430153.3962949,306774951.64868736,312397961.74899757,316595939.3309462,318684689.40297085,321078470.7037487,323342248.00547856,325504702.7839533,331709784.81673473,332390376.5567333,333083456.2185667,333789023.80223495,334507079.307738,335234500.7546172,335977532.10379,336733051.3747975,337501058.56763995,338281553.6823172,339052682.8556183,339826934.00937814,340601185.163138,341372314.33643913,342146565.4901989,342920816.64395875,343695067.7977186,344469318.9514784,345243570.1052382,346017821.25899804,346792072.4127579,347566323.5665177,348340574.72027755,349114825.8740373 +N5,South West NSW,Option 4,416523543.37,419592161.89297605,426784213.9668964,435030044.94867826,441723036.10041445,444955476.0375468,448499979.494254,451845052.90427196,455059361.3117193,467316675.2099498,468582992.8303935,469872545.63653326,471185333.62836933,472521356.80590165,473874806.37270606,475257299.9216307,476663028.6562516,478091992.5765687,479544191.68258214,480978964.3993233,482419545.9124885,483860127.4256537,485294900.14239496,486735481.6555602,488176063.1687254,489616644.6818906,491057226.19505584,492497807.70822114,493938389.2213863,495378970.73455155,496819552.2477168,498260133.76088196,499700715.27404714 +N5,South West NSW,Option 5,1046609662.37,1052952433.550638,1071323633.6956228,1091127653.585171,1093303952.6365564,1100869971.5927286,1109393448.461902,1117383500.573282,1124988856.6268702,1146342846.3103762,1149004694.6910162,1151715384.326347,1154474915.2163687,1157283287.361081,1160128290.446811,1163034345.1009045,1165989241.0096884,1168992978.1731632,1172045556.5913289,1175061504.068476,1178089661.859296,1181117819.650116,1184133767.1272635,1187161924.9180837,1190190082.7089036,1193218240.4997237,1196246398.2905438,1199274556.081364,1202302713.8721838,1205330871.663004,1208359029.4538238,1211387187.2446442,1214415345.0354638 +SWNSW1,Secondary Transmission Limit - South West NSW,Option 1,167000000,168033762.58,170734660.18,174001675.03,176856490.5,178229713.55,179731575.18,181125039.64,182441642.04,183022788.08,183614684.67,184217441.76,184831059.33,185455537.4,186088160.82,186734359.86,187391419.39,188059339.4,188738119.9,189408755.04,190082105.3,190755455.56,191426090.69,192099440.95,192772791.21,193446141.47,194119491.73,194792841.98,195466192.24,196139542.5,196812892.76,197486243.02,198159593.27 +N6,Wagga Wagga,Option 1,Refer to Forecasted Transmission flow paths (SNSW-CNSW) Option 3 and 4 in the flow paths,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +N7,Tumut,Option 1,HumeLink,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +N8,Cooma-Monaro,Option 1,201643435.92,203332481.09947747,206890562.86120406,211157393.32182783,214743970.03302857,216428450.63267922,218250636.54295516,219979845.51475963,221650742.66753212,225275368.0381893,226018272.1716338,226774807.5735819,227544974.24403355,228328772.18298873,229122793.5733216,229933854.04928395,230758545.7937499,231596868.80671936,232448823.08819243,233290553.9182878,234135692.56550908,234980831.21273035,235822562.0428257,236667700.69004697,237512839.33726823,238357977.98448953,239203116.63171083,240048255.2789321,240893393.92615336,241738532.57337463,242583671.2205959,243428809.86781716,244273948.5150384 +N8,Cooma-Monaro,Option 2,512954346.52,515746313.32277024,525094845.75862,535570829.7798261,544188611.3109237,548149756.2427804,552642984.5468522,545497432.6261146,549485986.34145,556155550.2711177,557911201.5018915,559699066.516716,561519145.3155916,563371437.898518,565247890.8194824,567164610.9705107,569113544.9055897,571094692.6247197,573108054.1279006,575097255.2930433,577094509.9041986,579091764.515354,581080965.6804968,583078220.2916522,585075474.9028076,587072729.513963,589069984.1251185,591067238.7362739,593064493.3474293,595061747.9585848,597059002.5697402,599056257.1808954,601053511.7920508 +N9,Hunter-Central Coast,Option 1,307063934.91,308253314.1289946,314822944.1075105,320875605.71978575,325062966.38444805,327057438.6371748,329488625.3114743,331801352.67970717,334010986.64000005,337298442.1695044,337930835.20317924,338574831.7787565,339230431.89623594,339897635.55561775,340573541.8714262,341263952.6146126,341965966.89970124,342679584.72669214,343404806.09558535,344121324.8080519,344840744.405994,345560164.00393605,346276682.7164026,346996102.31434464,347715521.91228676,348434941.5102289,349154361.10817087,349873780.706113,350593200.3040551,351312619.9019972,352032039.49993926,352751459.0978813,353470878.6958234 +N9,Hunter-Central Coast,Option 1A,283129029.82,284209281.02826434,290165004.8449576,295750949.6597992,299736762.10025394,301631460.1057646,303919442.1178005,306085986.5372956,308148964.04899776,311482912.3828618,312120640.25105953,312770069.54803157,313431200.27377784,314104032.4282984,314785640.6543997,315481875.6664688,316189812.10731214,316909449.9769298,317640789.2753217,318363352.5021329,319088841.08613765,319814329.6701424,320536892.8969537,321262381.4809584,321987870.0649632,322713358.64896804,323438847.23297274,324164335.81697756,324889824.4009823,325615312.98498714,326340801.56899184,327066290.15299666,327791778.7370014 +N9,Hunter-Central Coast,Option 1AB,274506774.68,275601709.74302685,281315034.7185488,286692285.03282773,290389108.8418773,292116926.81184053,294229969.8447924,296255229.2790906,298207740.2736086,301068367.8869726,301626733.2060974,302195343.7604355,302774199.5499869,303363300.5747517,303960085.5259263,304569677.02111757,305189513.75152224,305819595.71714014,306459922.9179713,307092566.1923926,307727770.7756171,308362975.35884166,308995618.6332629,309630823.21648747,310266027.79971206,310901232.38293654,311536436.96616113,312171641.5493857,312806846.1326102,313442050.7158348,314077255.29905933,314712459.8822839,315347664.4655084 +N9,Hunter-Central Coast,Option 1B,298259423,299459028.0639648,305779139.5944096,311618137.04806894,315516202.714833,317343887.4857389,319600475.1325514,321771800.0293708,323870247.4076546,326696260.5142928,327250320.8393112,327814547.40882534,328388940.22283524,328973499.28134096,329565683.0232184,330170574.57071555,330785632.3627085,331410856.3991972,332046246.68018156,332674012.2777942,333304319.4365307,333934626.5952673,334562392.19287986,335192699.35161644,335823006.51035297,336453313.6690895,337083620.8278261,337713927.9865626,338344235.14529914,338974542.3040357,339604849.4627722,340235156.6215088,340865463.7802453 +N9,Hunter-Central Coast,Option 2,58512971.63,59264516.47234721,60463128.646335594,61586003.06832661,62132057.98194883,62424149.08457694,62768378.68158464,63134260.82154819,63517191.71301671,63564126.00740309,63573163.051127784,63582365.912168525,63591734.590525314,63601269.08619815,63610927.94485802,63620794.07516296,63630826.02278395,63641023.78772098,63651387.36997407,63661626.58924012,63671907.26283518,63682187.93643024,63692427.15569628,63702707.82929134,63712988.5028864,63723269.17648146,63733549.85007653,63743830.52367159,63754111.197266646,63764391.87086171,63774672.54445677,63784953.21805183,63795233.89164689 +N9,Hunter-Central Coast,Option 2A,106104649.48,106707980.56588137,108923877.36792059,110857310.84005766,111741595.0292219,112127710.41144843,112683338.61457375,113276559.8963809,110568873.62265828,110596377.28538355,110605383.60410805,110614555.17638712,110623892.00222076,110633394.081609,110643020.10116316,110652852.68766055,110662850.52771251,110673013.62131906,110683341.96848017,110693546.37547536,110703792.0958592,110714037.81624302,110724242.22323821,110734487.94362204,110744733.66400586,110754979.3843897,110765225.10477354,110775470.82515736,110785716.54554118,110795962.26592502,110806207.98630886,110816453.70669268,110826699.4270765 +N10,Hunter Coast,Option 1,Refer to Forecasted Transmission flow paths CNSW-SNW Option 1,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +N11,Illawarra Coast,Option 1,Refer to N12 forecasted costs,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +N12,Illawarra,Option 1,813815319.22,817811736.6265252,832369006.0703616,848481785.9113315,860767680.5083158,866382557.1392083,872976740.2219964,879280385.8855536,885371634.1700364,892369728.4992911,894675154.5802765,897022882.0572433,899412910.9301914,901845241.199121,904309297.5150366,906826230.5759289,909385465.0328026,911987000.8856577,914630838.1344941,917242949.3363446,919865635.8871903,922488322.4380361,925100433.6398866,927723120.1907322,930345806.7415781,932968493.2924238,935591179.8432697,938213866.3941154,940836552.9449612,943459239.4958069,946081926.0466528,948704612.5974984,951327299.1483443 diff --git a/tests/test_workbook_table_cache/rez_augmentation_options_NSW.csv b/tests/test_workbook_table_cache/rez_augmentation_options_NSW.csv new file mode 100644 index 0000000..96799bc --- /dev/null +++ b/tests/test_workbook_table_cache/rez_augmentation_options_NSW.csv @@ -0,0 +1,30 @@ +REZ / Constraint ID,REZ Name,Option,Description,Additional network capacity (MW),Expected cost ($ million),Estimate source,Cost estimate class,$M/MW,Easement Length (km),Lead time,System strength connection costs ($M/MW) +N1,North West NSW,Option 1,"• Two new 500 kV circuits from Orana REZ to locality of Gilgandra to locality of Boggabri to locality of Moree. • A new single 500 kV circuit from Orana REZ to Wollar. • New 500/330 kV substations in locality of Boggabri and Moree. • A new 500 kV switching station in locality of Gilgandra. • A new 330 kV single-circuit from Sapphire to locality of Moree. • A new 330 kV circuit from Tamworth to locality of Boggabri. • Line shunt reactors at both ends of Orana REZ-locality of Gilgandra, locality of Gilgandra-locality of Boggabri, locality of Boggabri-locality of Moree 500 kV circuits.",1660.0,4684,AEMO TCD,Class 5b,2.8216867469879516,810.0,Long,0.137 +N2,New England,Option 1,"• New 330 kV Northern (Hub 10), Central South (Hub 1) and East (Hub 4) switching stations. • New 500 kV built and initially 330 kV operated DCST line from Central (Hub 5) to Central South (Hub1) with Quad Orange conductor. • New 500 kV built and initially 330 kV operated DCST line from Central (Hub 5) to Northern (Hub 10) with Quad Orange conductor. • New 330 kV DCST line between Central (Hub 5) and East (Hub 4) with Twin Olive conductor.",1000.0,370,AEMO TCD,Class 5b,0.37,60.0,Medium,0.137 +N2,New England,Option 2,"• New North switching station and cuts into Sapphire - Armidale and Dumaresq - Armidale line. • New 500 kV built and initially 330 kV operated double-circuit line from North switching station to Hub 5. • Augment Hub 5 with one additional 500/330 kV transformer. • New 500 kV double-circuit line, strung on one side between Hub 5 to Hub 1. • New 330 kV DCST line from Hub 8 to Hub 5. • New Hub 8 switching station. (Pre-requisite: CNSW-NNSW Option 3)",1500.0,1004,AEMO TCD,Class 5b,0.6693333333333333,140.0,Long, +N2,New England,Option 3,• New Hub 9 switching station. • Establish a new Lower Creek 330/132 kV substation with 1 x 330/132 kV 375 MVA transformer. • Rebuild part of Line 965 as 330 kV double-circuit from Armidale to Lower Creek. • Relocate existing 132 kV 200 MVA phase shift transformer on Line 965 from Armidale to Lower Creek. • New 330 kV double-circuit from Lower Creek to Hub 9. • Cut-in of Line 965 at new Lower Creek substation,900.0,647,AEMO TCD,Class 5b,0.7188888888888889,20.0,Medium, +N3,Central-West Orana,Central West Orana REZ transmission link,"• New Merotherie 500/330 kV substation with 4 x 500/330/33 kV 1500 MVA transformers. • New 330 kV Uarbry East, Uarbry West, Elong Elong switching stations. • New 500 kV Wollar switching station. • 2 x 500 kV double-circuit line from Wollar to Merotherie. • 330 kV double-circuit line from Merotherie to Uarbry East. • 330 kV double-circuit from Merotherie to Uarbry West. • 2 x 500 kV double-circuit and initially operated at 330 kV from Merotherie to Elong Elong. • 5 x 100 MVAr synchronous condensers at Elong Elong switching station. • 5 x 100 MVAr synchronous condensers at Merotherie substation. • Provision of switchbays for future generator connections. • An additional 330 kV single-circuit line from Bayswater to Liddell. • An additional 330 kV single-circuit line from Mt Piper to Wallerawang. • New 330 kV Uungula switching station and cut into Line 79 Wellington – Wollar • 1 x 330 kV DCST from Elong Elong to Uungula with Twin Olive conductor • 2 x 100 MVAr synchronous condensers at Uarbry West switching station • 3 x 100 MVAr synchronous condensers at Uarbry East switching station Note: Hunter Transmission Project is a pre-requisite for allowing network transfer greater than 3 GW.",4500.0,This project is considered to be anticipated and so is not included as an option here. The scope of the project is listed here for context so that the subsequent options can be understood. Option 1 includes expansions and augmentations to this project.,,,,,,Included as part of network build +N3,Central-West Orana,Option 1,• Expand Elong Elong substation with 3 x 500/330/33 kV 1500MVA transformers • Operate 4 circuits between Elong Elong and Merotherie to 500 kV (Pre-requisite: CWO REZ transmission link project) Note: Hunter Transmission Project will be required to get up to 6 GW total network capacity as pre-requisite. Note: Hunter Transmission Project is a pre-requisite for allowing network transfer greater than 3 GW. Note: 3 x 1500 MVA transformers provide for 3 GW at Elong Elong however REZ network capacity is limited to 6 GW,1500.0,243,AEMO TCD,Class 5b,0.162,,Medium, +N3,Central-West Orana,Option 2,• New 330 kV Stubbo switching station and cuts into Wellington - Wollar • New 330 kV single-circuit line between Wollar and Stubbo • Expand Wollar substation with 330 kV busbar and 1 x 500/300/33 kV 1500 MVA transformer,500.0,330,AEMO TCD,Class 5b,0.66,55.0,Medium, +N3,Central-West Orana,Option 3,• New 330 kV Burrendong switching station and cuts into Line Wellington - Mt Piper • New Uungula switching station and cuts into Wollar - Wellington • New 330 kV double-circuit line from Burrendong switching station to Uungula,500.0,273,AEMO TCD,Class 5b,0.546,45.0,Medium, +N4,Broken Hill,Option 1,• 500 kV double-circuit line from Bannaby – Broken Hill (>850 km). • Two mid-point switching stations and reactive plant.,1750.0,5098,AEMO TCD,Class 5b,2.9131428571428573,849.0,Long,0.137 +N4,Broken Hill,Option 2,500 kV double-circuit HVDC line from Bannaby – Broken Hill (>850 km). • New HVDC converter stationss at Bannaby and Broken Hill,1750.0,4576,AEMO TCD,Class 5b,2.6148571428571428,850.0,Long, +N5,South West NSW,Option 1,"• Expand Dinawan 330 kV switching station to 500/330 kV substation with 3 x 500/330/33 kV, 1500 MVA transformers • Operate 500 kV build and 330 kV operated double-circuit line from Dinawan to Wagga to 500 kV (Pre-requisite: EnergyConnect and HumeLink)",2500.0,1418,AEMO TCD,Class 5b,0.5672,0.0,Long,0.137 +N5,South West NSW,Option 2,• New Conargo 330 kV switching station • New 330 kV double-circuit line from Conargo to Dinawan (Pre-requisite: Dinawan - Wagga 500 kV upgrade),800.0,383,AEMO TCD,Class 5b,0.47875,69.0,Long, +N5,South West NSW,Option 3,• New Marbins Well 330 kV switching station • New 330 kV DCST line from Mabins Well to Dinawan (Pre-requisite: Dinawan - Wagga 500 kV upgrade),1400.0,300,AEMO TCD,Class 5b,0.21428571428571427,50.0,Long, +N5,South West NSW,Option 4,• New The Plains 330 kV switching station • New 330 kV double-circuit line and strung on one side from The Plains to Dinawan (Pre-requisite: South West REZ Option 1),1400.0,417,AEMO TCD,Class 5b,0.2978571428571429,88.0,Long, +N5,South West NSW,Option 5,• New Hays Plain 330 kV switching station • New Abercrombie 330 kV switching station • New 330 kV double-circuit line from Hays Plain to Abercrombie • New 330 kV double-circuit line from Abercrombie to The Plain • String the other side of 330 kV line from The Plain to Dinawan (Pre-requisite: South West REZ Option 4),1400.0,1047,AEMO TCD,Class 5b,0.7478571428571429,280.0,Long, +SWNSW1,Secondary Transmission Limit - South West NSW,Option 1,"• Establish a new Darlington Point to Dinawan 330 kV transmission line, post Project EnergyConnect (Pre-requisite: Project EnergyConnect and HumeLink)",600.0,167,Transgrid,Class 5a,0.2783333333333333,90.0,Short, +N6,Wagga Wagga,Option 1,Refer to SNSW-CNSW Option 3 and 4 subregional augmentations,,,,,,,,0.137 +N7,Tumut,Option 1,Refer to SNSW-CNSW Option 1 subregional augmentations,,,,,,,,Included as connection cost +N8,Cooma-Monaro,Option 1,• 132 kV single-circuit Williamsdale to Cooma-Monaro substation (located near generation interest),150.0,202,AEMO TCD,Class 5b,1.3466666666666667,81.0,Medium,0.137 +N8,Cooma-Monaro,Option 2,• 330 kV line Cooma-Williamdale-Stockdill • Two 330/132 kV transformers at Cooma,500.0,512,AEMO TCD,Class 5b,1.024,126.0,Medium, +N9,Hunter-Central Coast,Option 1,• Rebuild the existing Line 83 Liddell - Muswellbrook as 330 kV double-circuit line • 1 x 330 kV double-circuit from East Hub to Muswellbrook • 1 x 330 kV double-circuit from West Hub to Muswellbrook,950.0,307,AEMO TCD,Class 5b,0.3231578947368421,39.0,Medium,0.137 +N9,Hunter-Central Coast,Option 1A,"• Install a new 330 kV circuit between Liddell and Muswellbrook, Twin Olive conductor • 1 x 330 kV DCST from East Hub to Muswellbrook conductor • 1 x 330 kV DCST from West Hub to Muswellbrook",950.0,283,AEMO TCD,Class 5b,0.29789473684210527,57.0,Medium, +N9,Hunter-Central Coast,Option 1AB,• Install a new 330 kV circuit between Liddell and Muswellbrook • 1 x 330 kV DCST from East Muswellbrook Hub to Muswellbrook • Build 330/132 kV 375 MVA transformer at West Muswellbrook Hub • 1 x 132 kV DCST from West Muswellbrook Hub to Muswellbrook,850.0,274,AEMO TCD,Class 5b,0.32235294117647056,57.0,Medium, +N9,Hunter-Central Coast,Option 1B,• Rebuild the existing Line 83 Liddell - Muswellbrook as 330 kV double-circuit line • 1 x 330 kV DCST from East Muswellbrook Hub to Muswellbrook • Build 330/132 kV 375MVA transformer at West Muswellbrook Hub • 1 x 132 kV DCST from West Muswellbrook Hub to Muswellbrook,850.0,298,AEMO TCD,Class 5b,0.35058823529411764,39.0,Medium, +N9,Hunter-Central Coast,Option 2,• New 330 kV Singleton switching station and cuts into line 82 Liddell - Tomago,500.0,59,AEMO TCD,Class 5b,0.118,,Short, +N9,Hunter-Central Coast,Option 2A,• New 330/132 kV 375 MVA Singleton two transformer substation and cuts into line 82 Liddell - Tomago and connected to Ausgrid's Singleton 132 kV substation switching station,375.0,106,AEMO TCD,Class 5b,0.2826666666666667,,Medium, +N10,Hunter Coast,Option 1,Refer to CNSW-SNW Option 1 subregional augmentations,,,,,,,,0.137 +N11,Illawarra Coast,Option 1,• 500 kV double-circuit line from Dapto – Bannaby. • Two 500/330 kV 1500 MVA transformers at Dapto. (Pre-requisite: CNSW – SNW Option 2),2000.0,814,AEMO TCD,Class 5b,0.407,100.0,Long,0.137 +N12,Illawarra,Option 1,• 500 kV double-circuit line from Dapto – Bannaby. • Two 500/330 kV 1500 MVA transformers at Dapto. (Pre-requisite: CNSW – SNW Option 2),2000.0,814,AEMO TCD,Class 5b,0.407,100.0,Long,0.137 diff --git a/tests/test_workbook_table_cache/transmission_expansion_costs.csv b/tests/test_workbook_table_cache/transmission_expansion_costs.csv index 72fa8de..e4bd085 100644 --- a/tests/test_workbook_table_cache/transmission_expansion_costs.csv +++ b/tests/test_workbook_table_cache/transmission_expansion_costs.csv @@ -1,4 +1,4 @@ -,flow_path_name,indicative_transmission_expansion_cost_$/mw +,flow_path,indicative_transmission_expansion_cost_$/mw 0,CQ-NQ,1.126363636 1,CQ-GG,0.838709677 2,SQ-CQ,0.513333333 diff --git a/uv.lock b/uv.lock index 7dd8b93..fcc0529 100644 --- a/uv.lock +++ b/uv.lock @@ -959,7 +959,7 @@ wheels = [ [[package]] name = "isp-trace-parser" -version = "1.0.1" +version = "1.0.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "joblib" }, @@ -967,14 +967,14 @@ dependencies = [ { name = "polars" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/7c/ee/bb1766136177990809f922b31d157aa64276de2bd39d52dc5396d03869d1/isp_trace_parser-1.0.1.tar.gz", hash = "sha256:24b2bfea350bc7d9cf914cf37aca94ae2ae8d6a30e4c827bf2421f83440613c7", size = 47637 } +sdist = { url = "https://files.pythonhosted.org/packages/47/e7/31602674cebf1b951f113d0c185a00499a1d387ffdda4cf6e2e5a34c2004/isp_trace_parser-1.0.3.tar.gz", hash = "sha256:8862ac7bfe41ac7d3b7ac05e5f4f9fb9ce21689e48b5c73a1ca4b8f4b503f395", size = 47948 } wheels = [ - { url = "https://files.pythonhosted.org/packages/32/db/5275985f5305975443f58363307a0283b3549fdeec53b99827148d803f6a/isp_trace_parser-1.0.1-py3-none-any.whl", hash = "sha256:f7c59e0b449017454e9821d87ca80393e8599a9072dc1480a7d0fea93581ead3", size = 44849 }, + { url = "https://files.pythonhosted.org/packages/30/5a/74b90f1aaeec4304e9a80d310fb2ba247b960489b84463dee45f18fbfc8d/isp_trace_parser-1.0.3-py3-none-any.whl", hash = "sha256:a17bebd07edf6edd9c31b27dff361dfc64efbfd44d0f9e61c8642089ba461b1a", size = 45111 }, ] [[package]] name = "isp-workbook-parser" -version = "2.4.1" +version = "2.5.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "openpyxl" }, @@ -983,9 +983,9 @@ dependencies = [ { name = "pyyaml" }, { name = "thefuzz" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b4/b3/c82c654fdfa843b00e08609492204d68435a125138e8294ebaf51d5ef245/isp_workbook_parser-2.4.1.tar.gz", hash = "sha256:61d6577d90ff804ef662368d77a1c1978ab7456aae863f71478c55f20f1621d6", size = 52394 } +sdist = { url = "https://files.pythonhosted.org/packages/99/4a/33590f8f545b468020f28a2a2bb8071d5979e9ffa37dc2ed57b1502f50c6/isp_workbook_parser-2.5.0.tar.gz", hash = "sha256:958e5fa1115a1c5dee1a8d2f14a825f5a86133d3c0bd46cc4a465007ac954d8b", size = 53786 } wheels = [ - { url = "https://files.pythonhosted.org/packages/8e/0a/ce602e2f1a45dc45bcb266c43fcf9901eaeeb182f24cb15e3ff2774b0f0f/isp_workbook_parser-2.4.1-py3-none-any.whl", hash = "sha256:65731de03f29af97ead30a903d0fa3e45a5e9862358c82ea21e6ed807ea444df", size = 84820 }, + { url = "https://files.pythonhosted.org/packages/02/eb/bb3b6c8831f9e2dca80bc4b4e5163fa7ae1500c547d8794391064ba0a0be/isp_workbook_parser-2.5.0-py3-none-any.whl", hash = "sha256:d03b788ad305f93fb04f74924985e9325ba099498b99f24339c429dfd662df7c", size = 86361 }, ] [[package]] @@ -1031,8 +1031,8 @@ dev = [ [package.metadata] requires-dist = [ { name = "doit", specifier = ">=0.36.0" }, - { name = "isp-trace-parser", specifier = ">=1.0.0" }, - { name = "isp-workbook-parser", specifier = ">=2.4.1" }, + { name = "isp-trace-parser", specifier = ">=1.0.3" }, + { name = "isp-workbook-parser", specifier = ">=2.5.0" }, { name = "linopy", marker = "extra == 'solvers'", specifier = ">=0.4.4" }, { name = "pandas", specifier = ">=2.2.2" }, { name = "pyarrow", specifier = ">=18.0.0" },