diff --git a/.gitignore b/.gitignore index 7a697e3..dc6a0db 100644 --- a/.gitignore +++ b/.gitignore @@ -173,6 +173,3 @@ ispypsa_runs/**/*.hdf5 # ignore doit database .doit* - -.repomixignore -repomix.config.json diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..643049b --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,335 @@ +# Claude Coding Preferences for ISPyPSA + +This document captures coding preferences and patterns learned from working on the ISPyPSA project. + +## Testing Preferences + +### Test Structure +- Prefer comparing results to hardcoded DataFrames rather than using assert statements to check general properties +- Use the `csv_str_to_df` fixture to create test data in a readable format +- Sort DataFrames before comparison to ensure consistent ordering +- Test one thing at a time - implement and run tests individually before moving to the next + +### Using csv_str_to_df Fixture + +The `csv_str_to_df` fixture is a pytest fixture that converts CSV-formatted strings into pandas DataFrames. This makes tests more readable and maintainable. + +#### Basic Usage +```python +def test_my_function(csv_str_to_df): + # Create input data + input_data_csv = """ + name, value, active + item1, 100, True + item2, 200, False + """ + input_data = csv_str_to_df(input_data_csv) + + # Create expected output + expected_output_csv = """ + name, processed_value + item1, 150 + item2, 250 + """ + expected_output = csv_str_to_df(expected_output_csv) + + # Call function and compare + result = my_function(input_data) + pd.testing.assert_frame_equal(result, expected_output) +``` + +#### Important Notes on CSV Formatting +- **Whitespace**: The fixture handles whitespace around commas, making the CSV more readable +- **Data Types**: The fixture infers data types (integers, floats, booleans, strings) +- **Special Values**: Use `NaN` for missing values, `inf` for infinity +- **Column Alignment**: Align columns for better readability (optional but recommended) + +#### Complete Test Example +```python +def test_translate_custom_constraints_with_tables_no_rez_expansion(csv_str_to_df): + """Test translation of custom constraints when tables are present but REZ transmission expansion is disabled.""" + + # Input: REZ group constraints RHS + rez_group_constraints_rhs_csv = """ + constraint_id, summer_typical + REZ_NSW, 5000 + REZ_VIC, 3000 + """ + + # Input: REZ group constraints LHS + rez_group_constraints_lhs_csv = """ + constraint_id, term_type, variable_name, coefficient + REZ_NSW, generator_capacity, GEN1, 1.0 + REZ_NSW, generator_capacity, GEN2, 1.0 + REZ_VIC, generator_capacity, GEN3, 1.0 + """ + + # Input: Links DataFrame + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PathA-PathB, PathA-PathB_existing, AC, NodeA, NodeB, 1000, False + """ + + # Convert CSV strings to DataFrames + ispypsa_tables = { + "rez_group_constraints_rhs": csv_str_to_df(rez_group_constraints_rhs_csv), + "rez_group_constraints_lhs": csv_str_to_df(rez_group_constraints_lhs_csv), + } + links = csv_str_to_df(links_csv) + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Call the function under test + result = _translate_custom_constraints(config, ispypsa_tables, links) + + # Expected RHS result + expected_rhs_csv = """ + constraint_name, rhs + REZ_NSW, 5000 + REZ_VIC, 3000 + """ + expected_rhs = csv_str_to_df(expected_rhs_csv) + + # Expected LHS result - note the column order matches the actual output + expected_lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + REZ_NSW, GEN1, 1.0, Generator, p_nom + REZ_NSW, GEN2, 1.0, Generator, p_nom + REZ_VIC, GEN3, 1.0, Generator, p_nom + """ + expected_lhs = csv_str_to_df(expected_lhs_csv) + + # Assert results are as expected + assert "custom_constraints_rhs" in result + assert "custom_constraints_lhs" in result + + # Compare DataFrames with sorting to handle row order differences + pd.testing.assert_frame_equal( + result["custom_constraints_rhs"] + .sort_values("constraint_name") + .reset_index(drop=True), + expected_rhs.sort_values("constraint_name").reset_index(drop=True) + ) + + pd.testing.assert_frame_equal( + result["custom_constraints_lhs"] + .sort_values(["constraint_name", "variable_name"]) + .reset_index(drop=True), + expected_lhs.sort_values(["constraint_name", "variable_name"]) + .reset_index(drop=True) + ) +``` + +### Test Writing Best Practices + +1. **Column Order Matters**: Pay attention to the actual column order in the output. Run the test first to see the actual order, then adjust the expected CSV to match. + +2. **Sorting for Comparison**: When row order doesn't matter, sort both DataFrames before comparison: + ```python + pd.testing.assert_frame_equal( + actual.sort_values(["col1", "col2"]).reset_index(drop=True), + expected.sort_values(["col1", "col2"]).reset_index(drop=True) + ) + ``` + +3. **Handling Special Cases**: + - For DataFrames with NaN values, use `check_dtype=False` if type precision isn't critical + - For floating point comparisons, consider using `check_exact=False` or `rtol=1e-5` + - For columns that are calculated (like capital_cost), exclude them from comparison: + ```python + actual_to_compare = actual.drop(columns=["capital_cost"]) + ``` + +4. **Empty DataFrame Testing**: + ```python + # Test empty input returns empty output + result = my_function(pd.DataFrame()) + assert result.empty + ``` + +## Code Organization + +### Function Design +- Prefer small, focused functions with single responsibilities +- Extract complex workflows into independent subfunctions that can be tested separately +- Functions should return data (e.g., DataFrames) rather than modifying state +- Each function should handle its own edge cases (None inputs, empty DataFrames) + +### Refactoring Patterns +- When a function has multiple independent workflows, break it into: + 1. Separate functions for each workflow + 2. A main orchestration function that calls the subfunctions +- Move validation logic (like empty checks) into the lowest appropriate level + +### Example Refactoring Pattern +```python +# Before: Monolithic function with multiple responsibilities +def complex_function(inputs): + # Workflow 1 + # ... lots of code ... + + # Workflow 2 + # ... lots of code ... + + return results + +# After: Separated concerns +def _process_workflow_1(inputs): + # Handle edge cases + if inputs is None: + return pd.DataFrame() + # ... focused code ... + return result + +def _process_workflow_2(inputs): + # ... focused code ... + return result + +def complex_function(inputs): + result1 = _process_workflow_1(inputs) + result2 = _process_workflow_2(inputs) + return combine_results(result1, result2) +``` + +## Development Workflow + +### Version Control +- Be cautious about committing changes - only commit when explicitly requested +- Use descriptive git messages that focus on the "why" rather than the "what" + +### Environment Setup +- Use `uv` for Python package management +- Prefer `uv sync` over `uv pip install -e .` when a lock file exists +- Create separate virtual environments for different platforms (e.g., `.venv-wsl` for WSL) + +### Using uv for Development + +#### Initial Setup (WSL/Linux) +```bash +# Install uv if not already installed +curl -LsSf https://astral.sh/uv/install.sh | sh + +# Source the uv environment +source $HOME/.local/bin/env + +# Create a virtual environment (use different names for different platforms) +uv venv .venv-wsl # For WSL +# or +uv venv .venv # For native Linux/Mac + +# Install dependencies from lock file +uv sync + +# Or if you need to specify the venv location +UV_PROJECT_ENVIRONMENT=.venv-wsl uv sync +``` + +#### Running Tests with uv +```bash +# Basic test execution +source $HOME/.local/bin/env && uv run pytest tests/ + +# Run a specific test file +source $HOME/.local/bin/env && uv run pytest tests/test_translator/test_translate_custom_constraints.py + +# Run a specific test function with verbose output +source $HOME/.local/bin/env && uv run pytest tests/test_translator/test_translate_custom_constraints.py::test_translate_custom_constraints_no_tables_no_links -v + +# Run tests matching a pattern +source $HOME/.local/bin/env && uv run pytest tests/ -k "custom_constraint" -v + +# With a specific virtual environment +source $HOME/.local/bin/env && UV_PROJECT_ENVIRONMENT=.venv-wsl uv run pytest tests/test_translator/test_translate_custom_constraints.py -v +``` + +#### Running Python Scripts with uv +```bash +# Run a Python script +source $HOME/.local/bin/env && uv run python example_workflow.py + +# Run a module +source $HOME/.local/bin/env && uv run python -m ispypsa.model.build + +# Interactive Python shell with project dependencies +source $HOME/.local/bin/env && uv run python + +# Run with specific virtual environment +source $HOME/.local/bin/env && UV_PROJECT_ENVIRONMENT=.venv-wsl uv run python example_workflow.py +``` + +#### Common Workflow Commands +```bash +# Check which packages are installed +source $HOME/.local/bin/env && uv pip list + +# Add a new dependency (this updates pyproject.toml and uv.lock) +source $HOME/.local/bin/env && uv add pandas + +# Add a development dependency +source $HOME/.local/bin/env && uv add --dev pytest-mock + +# Update dependencies +source $HOME/.local/bin/env && uv sync --upgrade + +# Run pre-commit hooks +source $HOME/.local/bin/env && uv run pre-commit run --all-files +``` + +#### Troubleshooting +```bash +# If you get "Project virtual environment directory cannot be used" error +rm -rf .venv +source $HOME/.local/bin/env && uv sync + +# To explicitly set UV_LINK_MODE if you see hardlink warnings +export UV_LINK_MODE=copy +source $HOME/.local/bin/env && uv sync +``` + +#### Best Practices +1. Always source the uv environment before running commands: `source $HOME/.local/bin/env` +2. Use `UV_PROJECT_ENVIRONMENT` when you have multiple virtual environments +3. Run `uv sync` after pulling changes that might have updated dependencies +4. Use `uv run` prefix for all Python-related commands to ensure the correct environment is used + +### Testing Workflow +1. Implement the test with hardcoded expected results +2. Run the test to see if it passes +3. Fix any issues (like column ordering) based on actual results +4. Verify the test passes before moving to the next one + +## Code Style + +### DataFrame Operations +- Be explicit about column ordering in tests +- Use pandas testing utilities for DataFrame comparisons: +```python +pd.testing.assert_frame_equal( + actual.sort_values("key").reset_index(drop=True), + expected.sort_values("key").reset_index(drop=True) +) +``` + +### Function Naming +- Use descriptive names that indicate the function's purpose +- Private functions should start with underscore +- Use consistent naming patterns (e.g., `_process_*`, `_create_*`, `_translate_*`) + +## Communication Preferences + +### Progress Updates +- Work on one task at a time and show results before moving to the next +- Explain the reasoning behind refactoring suggestions +- Provide clear summaries of what was accomplished + +### Problem Solving +- When tests fail, show the error and fix it step by step +- Consider alternative approaches (like refactoring) to simplify complex testing scenarios +- Ask for clarification when there are multiple possible approaches diff --git a/example_workflow.py b/example_workflow.py index c9f2ce2..937609c 100644 --- a/example_workflow.py +++ b/example_workflow.py @@ -100,7 +100,6 @@ operational_timeseries_location, ) - network.optimize.fix_optimal_capacities() # Never use network.optimize() as this will remove custom constraints. diff --git a/pyproject.toml b/pyproject.toml index c03a857..33fd641 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,14 +10,14 @@ authors = [ dependencies = [ "pandas>=2.2.2", "pypsa>=0.31.1", - "isp-workbook-parser>=2.4.1", "pyyaml>=6.0.2", "doit>=0.36.0", "xmltodict>=0.13.0", "thefuzz>=0.22.1", - "isp-trace-parser>=1.0.0", "pyarrow>=18.0.0", "tables>=3.10.1", + "isp-trace-parser>=1.0.3", + "isp-workbook-parser>=2.6.0", ] readme = "README.md" requires-python = ">= 3.10" diff --git a/src/ispypsa/iasr_table_caching/local_cache.py b/src/ispypsa/iasr_table_caching/local_cache.py index 359aab7..952aede 100644 --- a/src/ispypsa/iasr_table_caching/local_cache.py +++ b/src/ispypsa/iasr_table_caching/local_cache.py @@ -2,7 +2,16 @@ from isp_workbook_parser import Parser -from ..templater.mappings import _GENERATOR_PROPERTIES +from ..templater.mappings import ( + _ACTIONABLE_ISP_PROJECTS_TABLES, + _FLOW_PATH_AGUMENTATION_TABLES, + _FLOW_PATH_AUGMENTATION_COST_TABLES, + _GENERATOR_PROPERTIES, + _PREPATORY_ACTIVITIES_TABLES, + _REZ_AUGMENTATION_COST_TABLES, + _REZ_CONNECTION_AGUMENTATION_TABLES, + _REZ_CONNECTION_PREPATORY_ACTIVITIES_TABLES, +) _GENERATOR_PROPERTY_TABLES = [ table_name @@ -34,6 +43,17 @@ "initial_build_limits", ] +_NETWORK_REQUIRED_TABLES = ( + _NETWORK_REQUIRED_TABLES + + _FLOW_PATH_AGUMENTATION_TABLES + + _FLOW_PATH_AUGMENTATION_COST_TABLES + + _PREPATORY_ACTIVITIES_TABLES + + _ACTIONABLE_ISP_PROJECTS_TABLES + + _REZ_CONNECTION_AGUMENTATION_TABLES + + _REZ_AUGMENTATION_COST_TABLES + + _REZ_CONNECTION_PREPATORY_ACTIVITIES_TABLES +) + _GENERATORS_STORAGE_REQUIRED_SUMMARY_TABLES = [ "existing_generators_summary", "committed_generators_summary", diff --git a/src/ispypsa/model/build.py b/src/ispypsa/model/build.py index 18ce04a..a983424 100644 --- a/src/ispypsa/model/build.py +++ b/src/ispypsa/model/build.py @@ -11,7 +11,7 @@ ) from ispypsa.model.initialise import _initialise_network from ispypsa.model.investment_period_weights import _add_investment_period_weights -from ispypsa.model.lines import _add_lines_to_network +from ispypsa.model.links import _add_links_to_network def build_pypsa_network( @@ -60,14 +60,8 @@ def build_pypsa_network( network, pypsa_friendly_tables["buses"], path_to_pypsa_friendly_timeseries_data ) - if not pypsa_friendly_tables["custom_constraints_generators"].empty: - _add_bus_for_custom_constraints(network) - - _add_lines_to_network(network, pypsa_friendly_tables["lines"]) - - _add_custom_constraint_generators_to_network( - network, pypsa_friendly_tables["custom_constraints_generators"] - ) + if "links" in pypsa_friendly_tables.keys(): + _add_links_to_network(network, pypsa_friendly_tables["links"]) _add_generators_to_network( network, @@ -75,13 +69,21 @@ def build_pypsa_network( path_to_pypsa_friendly_timeseries_data, ) - # The underlying linopy model needs to get built so we can add custom constraints. - network.optimize.create_model() + if "custom_constraints_generators" in pypsa_friendly_tables.keys(): + _add_bus_for_custom_constraints(network) - _add_custom_constraints( - network, - pypsa_friendly_tables["custom_constraints_rhs"], - pypsa_friendly_tables["custom_constraints_lhs"], - ) + _add_custom_constraint_generators_to_network( + network, pypsa_friendly_tables["custom_constraints_generators"] + ) + + # The underlying linopy model needs to get built so we can add custom constraints. + network.optimize.create_model(multi_investment_periods=True) + + if "custom_constraints_rhs" in pypsa_friendly_tables: + _add_custom_constraints( + network, + pypsa_friendly_tables["custom_constraints_rhs"], + pypsa_friendly_tables["custom_constraints_lhs"], + ) return network diff --git a/src/ispypsa/model/custom_constraints.py b/src/ispypsa/model/custom_constraints.py index 0ec7e84..135ffea 100644 --- a/src/ispypsa/model/custom_constraints.py +++ b/src/ispypsa/model/custom_constraints.py @@ -17,9 +17,9 @@ def _get_variables( component_name: str, the name given to the component when added by ISPyPSA to the `pypsa.Network`. component_type: str, the type of variable, should be one of - 'Generator', 'Line', 'Load', or 'Storage' + 'Generator', 'Link', 'Load', or 'Storage attribute_type: str, the type of variable, should be one of - 'p', 'p_nom', or 's' + 'p' or 'p_nom' Returns: linopy.variables.Variable @@ -27,8 +27,10 @@ def _get_variables( var = None if component_type == "Generator" and attribute_type == "p_nom": var = model.variables.Generator_p_nom.at[f"{component_name}"] - elif component_type == "Line" and attribute_type == "s": - var = model.variables.Line_s.loc[:, f"{component_name}"] + elif component_type == "Link" and attribute_type == "p": + var = model.variables.Link_p.loc[:, f"{component_name}"] + elif component_type == "Link" and attribute_type == "p_nom": + var = model.variables.Link_p_nom.at[f"{component_name}"] elif component_type == "Generator" and attribute_type == "p": var = model.variables.Generator_p.loc[:, f"{component_name}"] elif component_type == "Load" and attribute_type == "p": @@ -62,7 +64,7 @@ def _add_custom_constraints( custom_constraints_lhs: `pd.DataFrame` specifying custom constraint LHS values. The DataFrame has five columns 'constraint_name', 'variable_name', 'component', 'attribute', and 'coefficient'. The 'component' specifies - whether the LHS variable belongs to a `PyPSA` 'Bus', 'Generator', 'Line', + whether the LHS variable belongs to a `PyPSA` 'Bus', 'Generator', 'Link', etc. The 'variable_name' specifies the name of the `PyPSA` component, and the 'attribute' specifies the attribute of the component that the variable belongs to i.e. 'p_nom', 's_nom', etc. @@ -94,6 +96,19 @@ def _add_custom_constraints( x = tuple(zip(coefficients, variables)) linear_expression = network.model.linexpr(*x) - network.model.add_constraints( - linear_expression <= row["rhs"], name=constraint_name - ) + if row["constraint_type"] == "<=": + network.model.add_constraints( + linear_expression <= row["rhs"], name=constraint_name + ) + elif row["constraint_type"] == ">=": + network.model.add_constraints( + linear_expression >= row["rhs"], name=constraint_name + ) + elif row["constraint_type"] == "==": + network.model.add_constraints( + linear_expression == row["rhs"], name=constraint_name + ) + else: + raise ValueError( + f"{row['constraint_type']} is not a valid constraint type." + ) diff --git a/src/ispypsa/model/lines.py b/src/ispypsa/model/lines.py deleted file mode 100644 index 618a606..0000000 --- a/src/ispypsa/model/lines.py +++ /dev/null @@ -1,20 +0,0 @@ -from pathlib import Path - -import pandas as pd -import pypsa - - -def _add_lines_to_network(network: pypsa.Network, lines: pd.DataFrame) -> None: - """Adds the Lines defined in a pypsa-friendly input table called `"lines"` to the - `pypsa.Network` object. - - Args: - network: The `pypsa.Network` object - lines: `pd.DataFrame` with `PyPSA` style `Line` attributes. - - Returns: None - """ - lines["class_name"] = "Line" - lines["x"] = 1 - lines["r"] = 1 - lines.apply(lambda row: network.add(**row.to_dict()), axis=1) diff --git a/src/ispypsa/model/links.py b/src/ispypsa/model/links.py new file mode 100644 index 0000000..36652bd --- /dev/null +++ b/src/ispypsa/model/links.py @@ -0,0 +1,16 @@ +import pandas as pd +import pypsa + + +def _add_links_to_network(network: pypsa.Network, links: pd.DataFrame) -> None: + """Adds the Links defined in a pypsa-friendly input table called `"links"` to the + `pypsa.Network` object. + + Args: + network: The `pypsa.Network` object + links: `pd.DataFrame` with `PyPSA` style `Link` attributes. + + Returns: None + """ + links["class_name"] = "Link" + links.apply(lambda row: network.add(**row.to_dict()), axis=1) diff --git a/src/ispypsa/templater/create_template.py b/src/ispypsa/templater/create_template.py index 8956fcb..e5d22c8 100644 --- a/src/ispypsa/templater/create_template.py +++ b/src/ispypsa/templater/create_template.py @@ -10,6 +10,8 @@ ) from ispypsa.templater.flow_paths import ( _template_regional_interconnectors, + _template_rez_transmission_costs, + _template_sub_regional_flow_path_costs, _template_sub_regional_flow_paths, ) from ispypsa.templater.nodes import ( @@ -40,12 +42,8 @@ "partial_outage_forecasts", "seasonal_ratings", "closure_years", - "rez_group_constraints_expansion_costs", - "rez_group_constraints_lhs", - "rez_group_constraints_rhs", - "rez_transmission_limit_constraints_expansion_costs", - "rez_transmission_limit_constraints_lhs", - "rez_transmission_limit_constraints_rhs", + "custom_constraints_lhs", + "custom_constraints_rhs", ] @@ -98,12 +96,8 @@ def create_ispypsa_inputs_template( Returns: dictionary of dataframes in the `ISPyPSA` format. (add link to ispypsa table docs) """ - template = {} - transmission_expansion_costs = manually_extracted_tables.pop( - "transmission_expansion_costs" - ) template.update(manually_extracted_tables) if regional_granularity == "sub_regions": @@ -112,7 +106,12 @@ def create_ispypsa_inputs_template( ) template["flow_paths"] = _template_sub_regional_flow_paths( - iasr_tables["flow_path_transfer_capability"], transmission_expansion_costs + iasr_tables["flow_path_transfer_capability"] + ) + + template["flow_path_expansion_costs"] = _template_sub_regional_flow_path_costs( + iasr_tables, + scenario, ) elif regional_granularity == "nem_regions": @@ -137,6 +136,19 @@ def create_ispypsa_inputs_template( iasr_tables["initial_build_limits"] ) + possible_rez_or_constraint_names = list( + set( + list(template["renewable_energy_zones"]["rez_id"]) + + list(template["custom_constraints_rhs"]["constraint_id"]) + ) + ) + + template["rez_transmission_expansion_costs"] = _template_rez_transmission_costs( + iasr_tables, + scenario, + possible_rez_or_constraint_names, + ) + template["ecaa_generators"] = _template_ecaa_generators_static_properties( iasr_tables ) diff --git a/src/ispypsa/templater/flow_paths.py b/src/ispypsa/templater/flow_paths.py index 477c14c..2eaef6f 100644 --- a/src/ispypsa/templater/flow_paths.py +++ b/src/ispypsa/templater/flow_paths.py @@ -1,17 +1,22 @@ import logging import re -from pathlib import Path import pandas as pd from .helpers import ( + _fuzzy_match_names, _snakecase_string, + _strip_all_text_after_numeric_value, +) +from .mappings import ( + _FLOW_PATH_CONFIG, + _HVDC_FLOW_PATHS, + _REZ_CONFIG, ) -from .mappings import _HVDC_FLOW_PATHS def _template_sub_regional_flow_paths( - flow_path_capabilities: pd.DataFrame, transmission_expansion_costs: pd.DataFrame + flow_path_capabilities: pd.DataFrame, ) -> pd.DataFrame: """Processes the 'Flow path transfer capability' table into an ISPyPSA template format. @@ -19,8 +24,6 @@ def _template_sub_regional_flow_paths( Args: flow_path_capabilities: pd.DataFrame IASR table specifying the flow path transfer capabilities between subregions - transmission_expansion_costs: pd.DataFrame specifying the transmission - expansion costs for each flow path. Returns: `pd.DataFrame`: ISPyPSA sub-regional flow path template @@ -30,24 +33,19 @@ def _template_sub_regional_flow_paths( ) capability_columns = _clean_capability_column_names(flow_path_capabilities) sub_regional_capabilities = pd.concat([from_to_carrier, capability_columns], axis=1) - # Only keep forward_direction_mw_summer_typical limit col as that all that's - # being used for now. cols = [ - "flow_path_name", + "flow_path", "node_from", "node_to", "carrier", "forward_direction_mw_summer_typical", + "reverse_direction_mw_summer_typical", ] sub_regional_capabilities = sub_regional_capabilities.loc[:, cols] - - sub_regional_capabilities = pd.merge( - sub_regional_capabilities, - transmission_expansion_costs, - how="left", - on="flow_path_name", - ) - + # Combine flow paths which connect the same two regions. + sub_regional_capabilities = sub_regional_capabilities.groupby( + ["flow_path", "node_from", "node_to", "carrier"], as_index=False + ).sum() return sub_regional_capabilities @@ -69,10 +67,20 @@ def _template_regional_interconnectors( ) capability_columns = _clean_capability_column_names(interconnector_capabilities) regional_capabilities = pd.concat([from_to_carrier, capability_columns], axis=1) + regional_capabilities["forward_direction_mw_summer_typical"] = ( + _strip_all_text_after_numeric_value( + regional_capabilities["forward_direction_mw_summer_typical"] + ) + ) + regional_capabilities["forward_direction_mw_summer_typical"] = pd.to_numeric( + regional_capabilities["forward_direction_mw_summer_typical"].str.replace( + ",", "" + ) + ) # Only keep forward_direction_mw_summer_typical limit col as that all that's # being used for now. cols = [ - "flow_path_name", + "flow_path", "node_from", "node_to", "carrier", @@ -104,29 +112,10 @@ def _get_flow_path_name_from_to_carrier( # capture optional descriptor (e.g. '("Heywood")') + r"\s*(?P.*)" ) - from_to_desc["carrier"] = from_to_desc.apply( - lambda row: "DC" - if any( - [ - dc_line in row["descriptor"] - for dc_line in _HVDC_FLOW_PATHS["flow_path_name"] - ] - ) - # manually detect Basslink since the name is not in the descriptor - or (row["node_from"] == "TAS" and row["node_to"] == "VIC") - else "AC", - axis=1, - ) - from_to_desc["flow_path_name"] = from_to_desc.apply( - lambda row: _determine_flow_path_name( - row.node_from, - row.node_to, - row.descriptor, - row.carrier, - regional_granularity, - ), - axis=1, + from_to_desc["flow_path"] = ( + from_to_desc["node_from"] + "-" + from_to_desc["node_to"] ) + from_to_desc["carrier"] = "AC" return from_to_desc.drop(columns=["descriptor"]) @@ -150,7 +139,7 @@ def _determine_flow_path_name( name = _HVDC_FLOW_PATHS.loc[ (_HVDC_FLOW_PATHS.node_from == node_from) & (_HVDC_FLOW_PATHS.node_to == node_to), - "flow_path_name", + "flow_path", ].iat[0] elif descriptor and ( match := re.search( @@ -184,3 +173,697 @@ def _clean_capability_column_names(capability_df: pd.DataFrame) -> pd.DataFrame: col_name = _snakecase_string(direction + " (MW) " + qualifier) capability_columns.append(capability_df[col].rename(col_name)) return pd.concat(capability_columns, axis=1) + + +def _template_sub_regional_flow_path_costs( + iasr_tables: dict[str, pd.DataFrame], + scenario: str, +) -> pd.DataFrame: + """ + Process flow path augmentation options and cost forecasts to find the least cost + options for each flow path, return results in `ISPyPSA` format. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. Relevant DataFrames + include: + - Augmentation tables: columns include 'flow_path', 'option_name', + 'transfer_increase_forward_direction_mw', and + 'transfer_increase_reverse_direction_mw' + - Cost tables: columns include 'flow_path', 'option_name', and + financial year columns + - Preparatory activities: columns include 'flow_path', and financial + year columns + - Actionable projects: columns include 'flow_path', and financial year + columns + scenario: str specifying the scenario name (e.g., "Step Change", + "Progressive Change"). + + Returns: + pd.DataFrame containing the least cost option for each flow path. Columns: + - flow_path + - option_name + - nominal_flow_limit_increase_mw + - _$/mw (one column per year, e.g., '2024_25_$/mw') + """ + return process_transmission_costs( + iasr_tables=iasr_tables, + scenario=scenario, + config=_FLOW_PATH_CONFIG, + ) + + +def _template_rez_transmission_costs( + iasr_tables: dict[str, pd.DataFrame], + scenario: str, + possible_rez_or_constraint_names, +) -> pd.DataFrame: + """ + Process REZ augmentation options and cost forecasts to find least cost options for + each REZ, return results in `ISPyPSA` format. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. Relevant DataFrames + include: + - Augmentation tables: columns include 'rez_constraint_id', 'option', + and 'additional_network_capacity_mw' + - Cost tables: columns include 'rez_constraint_id', 'option', and + columns for each financial year (e.g., '2024-25', '2025-26', ...) + scenario: str specifying the scenario name (e.g., "Step Change", + "Progressive Change"). + possible_rez_or_constraint_names: list of possible names that cost data should + map to. The cost data is known to contain typos so the names in the cost + data are fuzzy match to the names provided in this input variable. + + Returns: + pd.DataFrame containing the least cost option for each REZ. Columns: + - rez_constraint_id + - option + - additional_network_capacity_mw + - _$/mw (cost per MW for each year, e.g., '2024_25_$/mw') + """ + rez_costs = process_transmission_costs( + iasr_tables=iasr_tables, + scenario=scenario, + config=_REZ_CONFIG, + ) + + rez_costs["rez_constraint_id"] = _fuzzy_match_names( + rez_costs["rez_constraint_id"], + possible_rez_or_constraint_names, + task_desc="Processing rez transmission costs", + ) + + return rez_costs + + +def process_transmission_costs( + iasr_tables: dict[str, pd.DataFrame], + scenario: str, + config: dict, +) -> pd.DataFrame: + """ + Generic function to process transmission costs (flow path or REZ). + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables + scenario: str specifying the scenario name + config: dict with processing configuration containing: + - transmission_type: str, either "flow_path" or "rez" + - in_coming_column_mappings: dict mapping standard column names to + rez or flow path specific names + - table_names: dict with augmentation and cost table lists + - mappings: dict with mappings for preparatory activities and other data + + Returns: + pd.DataFrame containing the least cost options with standardized column + structure + """ + cost_scenario = _determine_cost_scenario(scenario) + + # Get and process augmentation table + aug_table = _get_augmentation_table(iasr_tables=iasr_tables, config=config) + + # Get and process cost table + cost_table = _get_cost_table( + iasr_tables=iasr_tables, cost_scenario=cost_scenario, config=config + ) + + # Find the least cost options + final_costs = _get_least_cost_options( + aug_table=aug_table, cost_table=cost_table, config=config + ) + + return final_costs + + +def _determine_cost_scenario(scenario: str) -> str: + """ + Map ISP scenario to flow path/rez cost scenario. + + Args: + scenario: str specifying the scenario name. Must be one of "Step Change", + "Green Energy Exports", or "Progressive Change". + + Returns: + str specifying the internal scenario key (e.g., + "step_change_and_green_energy_exports" or "progressive_change"). + """ + if scenario in ["Step Change", "Green Energy Exports"]: + return "step_change_and_green_energy_exports" + elif scenario == "Progressive Change": + return "progressive_change" + else: + raise ValueError(f"scenario: {scenario} not recognised.") + + +def _get_augmentation_table( + iasr_tables: dict[str, pd.DataFrame], config: dict +) -> pd.DataFrame: + """ + Concatenate and clean all augmentation tables for a given transmission type. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. Relevant tables + must contain columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - capacity (nominal_flow_limit_increase_mw or + additional_network_capacity_mw) + config: dict with processing configuration containing: + - in_coming_column_mappings: dict mapping standard column names to type-specific names + - table_names: dict with augmentation table lists + + Returns: + pd.DataFrame containing the concatenated augmentation table. Columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - capacity (nominal_flow_limit_increase_mw or additional_network_capacity_mw) + """ + table_names = config["table_names"]["augmentation"] + missing = [t for t in table_names if t not in iasr_tables] + if missing: + logging.warning(f"Missing augmentation tables: {missing}") + aug_tables = [ + iasr_tables[table_name] + for table_name in table_names + if table_name in iasr_tables + ] + if not aug_tables: + raise ValueError( + f"No {config['transmission_type']} augmentation tables found in iasr_tables." + ) + aug_table = pd.concat(aug_tables, ignore_index=True) + aug_table = _clean_augmentation_table_column_names(aug_table, config) + aug_table = _clean_augmentation_table_column_values(aug_table, config) + return aug_table + + +def _get_cost_table( + iasr_tables: dict[str, pd.DataFrame], cost_scenario: str, config: dict +) -> pd.DataFrame: + """ + Combine all cost tables, preparatory activities, and actionable projects for a given + scenario into a single DataFrame. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. Cost tables must + have columns: + - id (flow_path or rez_constraint_id) + - option (Option Name or Option) + - (e.g., '2024-25', ...) + flow_path_scenario: str specifying the cost scenario name. + config: dict with processing configuration containing: + - transmission_type: str, either "flow_path" or "rez" + - column_mappings: dict mapping standard column names to rez/flow path names + - table_names: dict with cost table lists + - mappings: dict with option name mappings for preparatory activities and + actionable isp data + + Returns: + pd.DataFrame containing the combined cost table. Columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - (e.g., '2024_25', ...) + """ + cost_table_names = config["table_names"]["cost"][cost_scenario] + cost_table = _get_cleaned_cost_tables(iasr_tables, cost_table_names, config) + prep_activities = _get_prep_activities_table(iasr_tables, cost_scenario, config) + actionable_projects = _get_actionable_projects_table( + iasr_tables, cost_scenario, config + ) + return _combine_cost_tables(cost_table, prep_activities, actionable_projects) + + +def _get_least_cost_options( + aug_table: pd.DataFrame, cost_table: pd.DataFrame, config: dict +) -> pd.DataFrame: + """ + For each transmission, select the augmentation option with the lowest cost per MW of + increased capacity, using the first year with complete costs for all options. The + selected option and its costs per MW are used for all years. + + Args: + aug_table: pd.DataFrame containing columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - capacity (nominal_flow_limit_increase_mw or additional_network_capacity_mw) + cost_table: pd.DataFrame containing columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - (e.g., '2024_25', ...) + config: dict with processing configuration containing: + - transmission_type: str, either "flow_path" or "rez" + - in_coming_column_mappings: dict mapping standard column names to + type-specific names + + Returns: + pd.DataFrame containing columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - additional_network_capacity_mw + - _$/mw (cost per MW for each year, e.g., '2024_25_$/mw') + """ + year_cols = _get_year_columns(cost_table) + valid_costs_df = _find_first_year_with_complete_costs(cost_table, year_cols) + valid_costs_df["option"] = _fuzzy_match_names( + valid_costs_df["option"], + aug_table["option"], + "matching transmission augmentation options and costs", + not_match="existing", + threshold=80, + ) + transmission_analysis = pd.merge( + aug_table, valid_costs_df, on=["id", "option"], how="inner" + ) + _log_unmatched_transmission_options( + aug_table, valid_costs_df, transmission_analysis + ) + transmission_analysis["cost_per_mw"] = ( + transmission_analysis["cost"] + / transmission_analysis["nominal_capacity_increase"] + ) + least_cost_options = transmission_analysis.loc[ + transmission_analysis.groupby("id")["cost_per_mw"].idxmin() + ] + final_costs = pd.merge( + cost_table, + least_cost_options[["id", "option", "nominal_capacity_increase"]], + on=["id", "option"], + how="inner", + ) + # Divide each financial year column by capacity and rename with _$/mw suffix + for year_col in year_cols: + new_col = f"{year_col}_$/mw" + final_costs[new_col] = ( + final_costs[year_col] / final_costs["nominal_capacity_increase"] + ) + final_costs.drop(columns=year_col, inplace=True) + final_costs = final_costs.rename(columns=config["out_going_column_mappings"]) + return final_costs + + +def _clean_augmentation_table_column_names( + aug_table: pd.DataFrame, config: dict +) -> pd.DataFrame: + """ + Clean and rename columns in the augmentation table. + + Args: + aug_table: pd.DataFrame specifying the augmentation table. + config: dict with processing configuration containing: + - in_coming_column_mappings: dict mapping standard column names to type-specific names + + Returns: + pd.DataFrame containing the cleaned and renamed augmentation table. + """ + # Map specific columns to standardized names + # Reverse the in_coming_column_mappings dict to go from specific -> generic + aug_table = aug_table.rename(columns=config["in_coming_column_mappings"]) + cols_to_keep = list( + set( + [ + col + for col in config["in_coming_column_mappings"].values() + if col in aug_table.columns + ] + ) + ) + return aug_table.loc[:, cols_to_keep] + + +def _clean_augmentation_table_column_values( + aug_table: pd.DataFrame, config: dict +) -> pd.DataFrame: + """ + Prepare and typecast augmentation table columns for analysis. + + Args: + aug_table: pd.DataFrame containing transmission-specific columns + config: dict with processing configuration containing: + - transmission_type: str specifying the type of transmission + - in_coming_column_mappings: dict mapping standard column names to + flow path/rez names + + Returns: + pd.DataFrame containing standardized columns: + - id + - option + - nominal_capacity_increase + """ + transmission_type = config["transmission_type"] + + # Handle flow path special case: calculate capacity as max of forward and reverse + if transmission_type == "flow_path": + aug_table["forward_capacity_increase"] = pd.to_numeric( + _strip_all_text_after_numeric_value(aug_table["forward_capacity_increase"]), + errors="coerce", + ) + aug_table["reverse_capacity_increase"] = pd.to_numeric( + _strip_all_text_after_numeric_value(aug_table["reverse_capacity_increase"]), + errors="coerce", + ) + aug_table["nominal_capacity_increase"] = aug_table[ + ["forward_capacity_increase", "reverse_capacity_increase"] + ].max(axis=1) + else: + aug_table["nominal_capacity_increase"] = pd.to_numeric( + _strip_all_text_after_numeric_value(aug_table["nominal_capacity_increase"]), + errors="coerce", + ) + return aug_table + + +def _get_cleaned_cost_tables( + iasr_tables: dict[str, pd.DataFrame], cost_table_names: list, config: dict +) -> pd.DataFrame: + """ + Retrieve, clean, concatenate, and filter all cost tables for a scenario and + transmission type. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. + cost_table_names: list of str specifying the names of cost tables to extract + and clean. + config: dict with processing configuration containing: + - in_coming_column_mappings: dict mapping standard column names to + flow path / rez names + + Returns: + pd.DataFrame containing the concatenated and filtered cost tables. Columns: + - id + - option + - (e.g., '2024_25', ...) + """ + missing = [t for t in cost_table_names if t not in iasr_tables] + if missing: + logging.warning(f"Missing cost tables: {missing}") + cost_tables = [] + for table_name in cost_table_names: + if table_name not in iasr_tables: + continue + table = iasr_tables[table_name].copy() + table = table.rename(columns=config["in_coming_column_mappings"]) + cost_tables.append(table) + if not cost_tables: + raise ValueError("No cost tables found in iasr_tables.") + cost_table = pd.concat(cost_tables, ignore_index=True) + cost_table.columns = [_snakecase_string(col) for col in cost_table.columns] + forecast_year_cols = [ + col for col in cost_table.columns if re.match(r"^\d{4}_\d{2}$", col) + ] + if not forecast_year_cols: + raise ValueError("No financial year columns found in cost table") + cost_table[forecast_year_cols[0]] = pd.to_numeric( + cost_table[forecast_year_cols[0]], errors="coerce" + ) + cost_table = cost_table.dropna(subset=forecast_year_cols, how="all") + return cost_table + + +def _get_prep_activities_table( + iasr_tables: dict[str, pd.DataFrame], cost_scenario: str, config: dict +) -> pd.DataFrame: + """ + Process the preparatory activities table for a given transmission type. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. + cost_scenario: str specifying the internal scenario key. + config: dict with processing configuration containing: + - mappings: dict with mappings for preparatory activities and other data + + Returns: + pd.DataFrame containing the aggregated preparatory activities. Columns: + - id + - option + - (e.g., '2024_25', '2025_26', ...) + """ + transmission_type = config["transmission_type"] + if transmission_type == "flow_path": + prep_activities_table_name = ( + f"flow_path_augmentation_costs_{cost_scenario}_preparatory_activities" + ) + else: + prep_activities_table_name = ( + f"rez_augmentation_costs_{cost_scenario}_preparatory_activities" + ) + + if prep_activities_table_name not in iasr_tables: + logging.warning( + f"Missing preparatory activities table: {prep_activities_table_name}" + ) + # Return empty DataFrame with expected columns + return pd.DataFrame(columns=["id", "option"]) + + prep_activities = iasr_tables[prep_activities_table_name].copy() + prep_activities = prep_activities.rename( + columns=config["in_coming_column_mappings"] + ) + prep_activities.columns = [ + _snakecase_string(col) for col in prep_activities.columns + ] + prep_activities = prep_activities.drop( + columns=[col for col in prep_activities.columns if "unnamed" in col] + ) + + if transmission_type == "flow_path": + # Flow path preparatory activities processing + # Validate 'flow_path' values + invalid_flow_paths = set(prep_activities["id"]) - set( + config["mappings"]["prep_activities_name_to_option"].keys() + ) + if invalid_flow_paths: + raise ValueError( + f"Missing mapping values for the flow paths provided: {sorted(invalid_flow_paths)}. " + f"Please ensure these are present in templater/mappings.py." + ) + prep_activities["option"] = prep_activities["id"].map( + config["mappings"]["prep_activities_name_to_option"] + ) + + # Validate 'option_name' values + invalid_option_names = set(prep_activities["option"]) - set( + config["mappings"]["option_to_id"].keys() + ) + if invalid_option_names: + raise ValueError( + f"Missing mapping values for the option names provided: {sorted(invalid_option_names)}. " + f"Please ensure these are present in templater/mappings.py." + ) + prep_activities = prep_activities.groupby("option").sum().reset_index() + prep_activities["id"] = prep_activities["option"].map( + config["mappings"]["option_to_id"] + ) + + elif transmission_type == "rez": + # Validate REZ names/IDs + invalid_rez_names = set(prep_activities["rez"]) - set( + config["prep_activities_mapping"].keys() + ) + if invalid_rez_names: + raise ValueError( + f"Missing mapping values for the REZ names provided: {sorted(invalid_rez_names)}. " + f"Please ensure these are present in templater/mappings.py." + ) + + prep_activities["option"] = prep_activities["rez"].apply( + lambda x: config["prep_activities_mapping"][x][1] + ) + prep_activities["id"] = prep_activities["rez"].apply( + lambda x: config["prep_activities_mapping"][x][0] + ) + return _sort_cols(prep_activities, ["id", "option"]) + + +def _get_actionable_projects_table( + iasr_tables: dict[str, pd.DataFrame], cost_scenario: str, config: dict +) -> pd.DataFrame: + """ + Process the actionable ISP projects table for flow paths. + + Args: + iasr_tables: dict[str, pd.DataFrame] specifying IASR tables. Table must have + columns: + - id (flow_path) + - (e.g., '2024-25', ...) + cost_scenario: str specifying the internal scenario key. + config: dict with processing configuration containing: + - mappings: dict with mappings for actionable projects + + Returns: + pd.DataFrame containing the actionable projects table. Columns: + - id (flow_path) + - option (option_name) + - (e.g., '2024_25', '2025_26', ...) + """ + transmission_type = config["transmission_type"] + + # REZ has no actionable projects, return empty DataFrame + if transmission_type == "rez": + return pd.DataFrame(columns=["id", "option"]) + + # Process flow path actionable projects + actionable_projects_table_name = ( + f"flow_path_augmentation_costs_{cost_scenario}_actionable_isp_projects" + ) + + if actionable_projects_table_name not in iasr_tables: + logging.warning( + f"Missing actionable ISP projects table: {actionable_projects_table_name}" + ) + # Return empty DataFrame with expected columns + return pd.DataFrame(columns=["id", "option"]) + + actionable_projects = iasr_tables[actionable_projects_table_name].copy() + actionable_projects = actionable_projects.rename( + columns=config["in_coming_column_mappings"] + ) + actionable_projects.columns = [ + _snakecase_string(col) for col in actionable_projects.columns + ] + actionable_projects = actionable_projects.drop( + columns=[col for col in actionable_projects.columns if "unnamed" in col] + ) + + # Validate 'flow_path' values + invalid_flow_paths = set(actionable_projects["id"]) - set( + config["mappings"]["actionable_name_to_option"].keys() + ) + if invalid_flow_paths: + raise ValueError( + f"Missing mapping values for the flow paths provided: {sorted(invalid_flow_paths)}. " + f"Please ensure these are present in {config['mappings']['actionable_name_to_option']}." + ) + actionable_projects["option"] = actionable_projects["id"].map( + config["mappings"]["actionable_name_to_option"] + ) + + # Validate 'option_name' values + invalid_option_names = set(actionable_projects["option"]) - set( + config["mappings"]["actionable_option_to_id"].keys() + ) + if invalid_option_names: + raise ValueError( + f"Missing mapping values for the option names provided: {sorted(invalid_option_names)}. " + f"Please ensure these are present in {config['mappings']['actionable_option_to_id']}." + ) + actionable_projects["id"] = actionable_projects["option"].map( + config["mappings"]["actionable_option_to_id"] + ) + + return _sort_cols(actionable_projects, ["id", "option"]) + + +def _combine_cost_tables( + cost_table: pd.DataFrame, + prep_activities: pd.DataFrame, + actionable_projects: pd.DataFrame, +) -> pd.DataFrame: + """ + Combine the cost table, preparatory activities table, and actionable projects table + into a single DataFrame. + + Args: + cost_table: pd.DataFrame specifying the cost table. + prep_activities: pd.DataFrame specifying the preparatory activities table. + actionable_projects: pd.DataFrame specifying the actionable projects table. + + Returns: + pd.DataFrame containing the combined cost table. + """ + tables = [cost_table, prep_activities] + + # Only include actionable_projects if it's not empty + if not actionable_projects.empty: + tables.append(actionable_projects) + + return pd.concat(tables, ignore_index=True) + + +def _get_year_columns(cost_table: pd.DataFrame) -> list: + """ + Get the financial year columns from the cost table. + + Args: + cost_table: pd.DataFrame specifying the cost table. + + Returns: + list of str specifying the financial year columns. + """ + year_cols = [col for col in cost_table.columns if re.match(r"\d{4}_\d{2}", col)] + return year_cols + + +def _find_first_year_with_complete_costs( + cost_table: pd.DataFrame, year_cols: list +) -> pd.DataFrame: + """ + Find the first year with complete costs for each transmission. + + Args: + cost_table: pd.DataFrame specifying the cost table with columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - (e.g., '2024_25', ...) + year_cols: list of str specifying the financial year column names. + + Returns: + pd.DataFrame containing columns: + - id (flow_path or rez_constraint_id) + - option (option_name or option) + - cost + - first_valid_year_col + """ + valid_cost_rows = [] + missing_full_year_transmissions = [] + for transmission, group in cost_table.groupby("id"): + found = False + # Iterate through years (sort years based of first int in year string) + for year in sorted(year_cols, key=lambda y: int(y.split("_")[0])): + costs = pd.to_numeric(group[year], errors="coerce") + if not costs.isna().any(): + for idx, row in group.iterrows(): + entry = row[["id", "option"]].to_dict() + entry["cost"] = costs.loc[idx] + entry["first_valid_year_col"] = year + valid_cost_rows.append(entry) + found = True + break + if not found: + missing_full_year_transmissions.append(transmission) + if missing_full_year_transmissions: + raise ValueError( + f"No year found with all non-NA costs for transmissions: {missing_full_year_transmissions}" + ) + return pd.DataFrame(valid_cost_rows) + + +def _log_unmatched_transmission_options( + aug_table: pd.DataFrame, valid_costs_df: pd.DataFrame, merged_df: pd.DataFrame +): + """ + Logs (id, option) pairs that were dropped from each side during the merge. + """ + left_keys = set(tuple(x) for x in aug_table[["id", "option"]].values) + right_keys = set(tuple(x) for x in valid_costs_df[["id", "option"]].values) + merged_keys = set(tuple(x) for x in merged_df[["id", "option"]].values) + + dropped_from_left = left_keys - merged_keys + dropped_from_right = right_keys - merged_keys + + if dropped_from_left: + logging.info( + f"Dropped options from augmentation table: {sorted(dropped_from_left)}" + ) + if dropped_from_right: + logging.info(f"Dropped options from cost table: {sorted(dropped_from_right)}") + + +def _sort_cols(table: pd.DataFrame, start_cols: list[str]) -> pd.DataFrame: + """ + Reorder a pd.DataFrame's column using the fixed order provided in start_cols and + then sorting the remaining columns alphabetically. + """ + remaining_cols = list(set(table.columns) - set(start_cols)) + sorted_remaining_columns = sorted(remaining_cols) + return table.loc[:, start_cols + sorted_remaining_columns] diff --git a/src/ispypsa/templater/helpers.py b/src/ispypsa/templater/helpers.py index a622bf0..4a7eeb2 100644 --- a/src/ispypsa/templater/helpers.py +++ b/src/ispypsa/templater/helpers.py @@ -227,3 +227,18 @@ def _convert_financial_year_columns_to_float(df: pd.DataFrame) -> pd.DataFrame: for col in df.columns ] return pd.concat(cols, axis=1) + + +def _strip_all_text_after_numeric_value( + series: pd.Index | pd.Series, +) -> pd.Index | pd.Series: + """ + Removes all text after the first numeric value. + + Numeric value can contain commas and one period. + """ + if series.dtypes == "object": + series = series.astype(str).str.replace( + r"^([0-9\.\,+]+)\s+.*", r"\1", regex=True + ) + return series diff --git a/src/ispypsa/templater/manually_extracted_template_tables/6.0/custom_constraints_lhs.csv b/src/ispypsa/templater/manually_extracted_template_tables/6.0/custom_constraints_lhs.csv new file mode 100644 index 0000000..8ed01cb --- /dev/null +++ b/src/ispypsa/templater/manually_extracted_template_tables/6.0/custom_constraints_lhs.csv @@ -0,0 +1,61 @@ +constraint_id,term_type,term_id,coefficient +NQ2,link_flow,CQ-NQ,-1 +NQ2,link_flow,Q4-CQ,1 +NQ2,link_flow,Q5-CQ,1 +SQ1,link_flow,SQ-CQ,-0.5 +SQ1,link_flow,Q7-SQ,1 +SWV1,link_flow,VIC-SESA,-1 +SWV1,link_flow,V4-VIC,1 +SWV1,link_flow,V8-VIC,1 +MN1,link_flow,S3-CSA,1 +MN1,link_flow,S4-CSA,0.5 +MN1,link_flow,S5-CSA,1 +MN1,link_flow,S6-CSA,1 +MN1,link_flow,S7-CSA,1 +MN1,link_flow,S8-CSA,1 +MN1,link_flow,S9-CSA,1 +MN1,load_consumption,CSA Export Electrolyser,-1 +MN1 North,link_flow,S3-CSA,1 +MN1 North,link_flow,S4-CSA,-0.5 +MN1 North,link_flow,S5-CSA,-1 +MN1 North,link_flow,S6-CSA,-1 +MN1 North,link_flow,S7-CSA,-1 +MN1 North,link_flow,S8-CSA,-1 +MN1 North,link_flow,S9-CSA,-1 +MN1 North,load_consumption,CSA Export Electrolyser,1 +NSA1,link_flow,S5-CSA,0.5 +NSA1,link_flow,S8-CSA,1 +NSA1,link_flow,S9-CSA,1 +NSA1,load_consumption,CSA Export Electrolyser,-1 +NSA1 North,link_flow,S5-CSA,-0.05 +NSA1 North,link_flow,S8-CSA,-1 +NSA1 North,link_flow,S9-CSA,-1 +NSA1 North,load_consumption,CSA Export Electrolyser,1 +NET1,link_flow,T1-TAS,1 +NET1,link_flow,T4-TAS,1 +SEVIC1,link_flow,V5-VIC,1 +SEVIC1,link_flow,V7-VIC,1 +SEVIC1,link_flow,TAS-VIC,1 +SEVIC1,generator_output,Loy Yang A Power Station,1 +SEVIC1,generator_output,Loy Yang B,1 +SEVIC1,generator_output,Yallourn W,1 +SEVIC1,generator_output,Jeeralang A,1 +SEVIC1,generator_output,Jeeralang B,1 +SEVIC1,generator_output,Bairnsdale,1 +SEVIC1,generator_output,Valley Power,1 +SWQLD1,link_flow,Q8-SQ,1 +SWQLD1,link_flow,NNSW-SQ,1.5 +SWQLD1,link_flow,SQ-CQ,-0.3 +SWQLD1,generator_output,Tarong,0.6 +SWQLD1,generator_output,Tarong North,0.6 +SWQLD1,generator_output,Kogan Creek,0.6 +SWQLD1,generator_output,Darling Downs,0.6 +SWQLD1,generator_output,Braemar,0.6 +SWQLD1,generator_output,Braemar 2 Power Station,0.6 +SWQLD1,generator_output,Millmerran,1.5 +SWQLD1,generator_output,Borumba,0.5 +S1-TBMO,link_flow,SESA-CSA,0.3 +S1-TBMO,link_flow,S1-SESA,1 +S1-TBMO,generator_output,Tailem Bend Solar Farm,1 +S1-TBMO,generator_output,Tailem Bend Solar Farm - stage 2,1 +S1-TBMO,storage_output,Tailem Bend Battery Project,1 diff --git a/src/ispypsa/templater/manually_extracted_template_tables/6.0/custom_constraints_rhs.csv b/src/ispypsa/templater/manually_extracted_template_tables/6.0/custom_constraints_rhs.csv new file mode 100644 index 0000000..396c487 --- /dev/null +++ b/src/ispypsa/templater/manually_extracted_template_tables/6.0/custom_constraints_rhs.csv @@ -0,0 +1,12 @@ +constraint_id,constraint_type,rhs +NQ2,<=,2500 +SQ1,<=,1400 +SWV1,<=,1850 +MN1,<=,2000 +MN1 North,<=,800 +NSA1,<=,1125 +NSA1 North,<=,350 +NET1,<=,1600 +SEVIC1,<=,6000 +SWQLD1,<=,5300 +S1-TBMO,<=,350 diff --git a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_group_constraints_expansion_costs.csv b/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_group_constraints_expansion_costs.csv deleted file mode 100644 index 00ae7b6..0000000 --- a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_group_constraints_expansion_costs.csv +++ /dev/null @@ -1,8 +0,0 @@ -term_id,indicative_transmission_expansion_cost_$/mw -NQ2-EXPANSION,430000 -SQ1-EXPANSION,490000 -SW1-EXPANSION,630000 -MN1-EXPANSION,320000 -MN1 North-EXPANSION,1430000 -NSA1-EXPANSION,220000 -NTE1-EXPANSION,340000 diff --git a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_group_constraints_lhs.csv b/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_group_constraints_lhs.csv deleted file mode 100644 index 602d07e..0000000 --- a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_group_constraints_lhs.csv +++ /dev/null @@ -1,43 +0,0 @@ -constraint_id,term_type,term_id,coefficient -NQ2,line_flow,CQ-NQ,-1 -NQ2,line_flow,Q4-CQ,1 -NQ2,line_flow,Q5-CQ,1 -NQ2,generator_capacity,NQ2-EXPANSION,-1 -SQ1,line_flow,SQ-CQ,-0.5 -SQ1,line_flow,Q7-SQ,1 -SQ1,generator_capacity,SQ1-EXPANSION,-1 -SWV1,line_flow,Heywood,-1 -SWV1,line_flow,V4-VIC,1 -SWV1,line_flow,V8-VIC,1 -SWV1,generator_capacity,SW1-EXPANSION,-1 -MN1,line_flow,S3-CSA,1 -MN1,line_flow,S4-CSA,0.5 -MN1,line_flow,S5-CSA,1 -MN1,line_flow,S6-CSA,1 -MN1,line_flow,S7-CSA,1 -MN1,line_flow,S8-CSA,1 -MN1,line_flow,S9-CSA,1 -MN1,load_consumption,CSA Export Electrolyser,-1 -MN1,generator_capacity,MN1-EXPANSION,-1 -MN1 North,line_flow,S3-CSA,1 -MN1 North,line_flow,S4-CSA,-0.5 -MN1 North,line_flow,S5-CSA,-1 -MN1 North,line_flow,S6-CSA,-1 -MN1 North,line_flow,S7-CSA,-1 -MN1 North,line_flow,S8-CSA,-1 -MN1 North,line_flow,S9-CSA,-1 -MN1 North,load_consumption,CSA Export Electrolyser,1 -MN1 North,generator_capacity,MN1 North-EXPANSION,-1 -NSA1,line_flow,S5-CSA,0.5 -NSA1,line_flow,S8-CSA,1 -NSA1,line_flow,S9-CSA,1 -NSA1,load_consumption,CSA Export Electrolyser,-1 -NSA1,generator_capacity,NSA1-EXPANSION,-1 -NSA1 North,line_flow,S5-CSA,-0.05 -NSA1 North,line_flow,S8-CSA,-1 -NSA1 North,line_flow,S9-CSA,-1 -NSA1 North,load_consumption,CSA Export Electrolyser,1 -NSA1 North,generator_capacity,NSA1-EXPANSION,-1 -NET1,line_flow,T1-TAS,1 -NET1,line_flow,T4-TAS,1 -NET1,generator_capacity,NTE1-EXPANSION,-1 diff --git a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_group_constraints_rhs.csv b/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_group_constraints_rhs.csv deleted file mode 100644 index e7ef56e..0000000 --- a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_group_constraints_rhs.csv +++ /dev/null @@ -1,9 +0,0 @@ -constraint_id,summer_peak,summer_typical,winter_reference -NQ2,2500,2500,2750 -SQ1,1400,1400,1 -SWV1,1850,1850,1850 -MN1,2000,2000,2000 -MN1 North,800,800,800 -NSA1,1125,1125,1125 -NSA1 North,350,350,350 -NET1,1600,1600,1600 diff --git a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_transmission_limit_constraints_expansion_costs.csv b/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_transmission_limit_constraints_expansion_costs.csv deleted file mode 100644 index b00d07e..0000000 --- a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_transmission_limit_constraints_expansion_costs.csv +++ /dev/null @@ -1,4 +0,0 @@ -term_id,indicative_transmission_expansion_cost_$/mw -SEVIC1-EXPANSION,150000 -SWQLD1-EXPANSION,60000 -S1-TBMO-EXPANSION,280000 diff --git a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_transmission_limit_constraints_lhs.csv b/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_transmission_limit_constraints_lhs.csv deleted file mode 100644 index 71ad405..0000000 --- a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_transmission_limit_constraints_lhs.csv +++ /dev/null @@ -1,30 +0,0 @@ -constraint_id,term_type,term_id,coefficient -SEVIC1,line_flow,V5-VIC,1 -SEVIC1,line_flow,V7-VIC,1 -SEVIC1,line_flow,Basslink,1 -SEVIC1,generator_output,Loy Yang A Power Station,1 -SEVIC1,generator_output,Loy Yang B,1 -SEVIC1,generator_output,Yallourn W,1 -SEVIC1,generator_output,Jeeralang A,1 -SEVIC1,generator_output,Jeeralang B,1 -SEVIC1,generator_output,Bairnsdale,1 -SEVIC1,generator_output,Valley Power,1 -SEVIC1,generator_capacity,SEVIC1-EXPANSION,-1 -SWQLD1,line_flow,Q8-SQ,1 -SWQLD1,line_flow,NNSW-SQ,1.5 -SWQLD1,line_flow,SQ-CQ,-0.3 -SWQLD1,generator_output,Tarong,0.6 -SWQLD1,generator_output,Tarong North,0.6 -SWQLD1,generator_output,Kogan Creek,0.6 -SWQLD1,generator_output,Darling Downs,0.6 -SWQLD1,generator_output,Braemar,0.6 -SWQLD1,generator_output,Braemar 2 Power Station,0.6 -SWQLD1,generator_output,Millmerran,1.5 -SWQLD1,generator_output,Borumba,0.5 -SWQLD1,generator_capacity,SWQLD1-EXPANSION,-1 -S1-TBMO,line_flow,SESA-CSA,0.3 -S1-TBMO,line_flow,S1-SESA,1 -S1-TBMO,generator_output,Tailem Bend Solar Farm,1 -S1-TBMO,generator_output,Tailem Bend Solar Farm - stage 2,1 -S1-TBMO,storage_output,Tailem Bend Battery Project,1 -S1-TBMO,generator_capacity,S1-TBMO-EXPANSION,-1 diff --git a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_transmission_limit_constraints_rhs.csv b/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_transmission_limit_constraints_rhs.csv deleted file mode 100644 index 66abf9f..0000000 --- a/src/ispypsa/templater/manually_extracted_template_tables/6.0/rez_transmission_limit_constraints_rhs.csv +++ /dev/null @@ -1,4 +0,0 @@ -constraint_id,summer_peak,summer_typical,winter_reference -SEVIC1,6000,6000,6000 -SWQLD1,5300,5300,5300 -S1-TBMO,350,350,350 diff --git a/src/ispypsa/templater/manually_extracted_template_tables/6.0/transmission_expansion_costs.csv b/src/ispypsa/templater/manually_extracted_template_tables/6.0/transmission_expansion_costs.csv deleted file mode 100644 index 55ddae1..0000000 --- a/src/ispypsa/templater/manually_extracted_template_tables/6.0/transmission_expansion_costs.csv +++ /dev/null @@ -1,14 +0,0 @@ -flow_path_name,indicative_transmission_expansion_cost_$/mw -CQ-NQ,1.126363636 -CQ-GG,0.838709677 -SQ-CQ,0.513333333 -NNSW-SQ,1.702027027 -Terranora,NA -CNSW-NNSW,0.497666667 -CNSW-SNW,0.284 -SNSW-CNSW,0.502333333 -VIC-SNSW,2.00554939 -Heywood,0.454333333 -SESA-CSA,0.602333333 -Murraylink,NA -Basslink,3.646666667 diff --git a/src/ispypsa/templater/mappings.py b/src/ispypsa/templater/mappings.py index 3776069..4e19513 100644 --- a/src/ispypsa/templater/mappings.py +++ b/src/ispypsa/templater/mappings.py @@ -42,7 +42,7 @@ { "node_from": ["NNSW", "VIC", "TAS"], "node_to": ["SQ", "CSA", "VIC"], - "flow_path_name": ["Terranora", "Murraylink", "Basslink"], + "flow_path": ["Terranora", "Murraylink", "Basslink"], } ) @@ -318,3 +318,180 @@ }, ], } + + +# Subregion flow paths +_SUBREGION_FLOW_PATHS = [ + "CQ-NQ", + "CQ-GG", + "SQ-CQ", + "NNSW-SQ", + "CNSW-NNSW", + "CNSW-SNW", + "SNSW-CNSW", + "VIC-SNSW", + "TAS-VIC", + "VIC-SESA", + "SESA-CSA", +] + +_FLOW_PATH_AGUMENTATION_TABLES = [ + "flow_path_augmentation_options_" + fp for fp in _SUBREGION_FLOW_PATHS +] + +_REZ_CONNECTION_AGUMENTATION_TABLES = [ + "rez_augmentation_options_" + region for region in list(_NEM_REGION_IDS) +] + +_FLOW_PATH_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE = [ + "flow_path_augmentation_costs_progressive_change_" + fp + for fp in _SUBREGION_FLOW_PATHS +] + +_FLOW_PATH_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS = [ + "flow_path_augmentation_costs_step_change_and_green_energy_exports_" + fp + for fp in _SUBREGION_FLOW_PATHS +] + +_FLOW_PATH_AUGMENTATION_COST_TABLES = ( + _FLOW_PATH_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE + + _FLOW_PATH_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS +) + +_REZ_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE = [ + "rez_augmentation_costs_progressive_change_" + region + for region in list(_NEM_REGION_IDS) +] + +_REZ_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS = [ + "rez_augmentation_costs_step_change_and_green_energy_exports_" + region + for region in list(_NEM_REGION_IDS) +] + +_REZ_AUGMENTATION_COST_TABLES = ( + _REZ_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE + + _REZ_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS +) + +_FLOW_PATH_AGUMENTATION_NAME_ADJUSTMENTS = { + "Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Forward direction": "transfer_increase_forward_direction_MW", + "Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Reverse direction": "transfer_increase_reverse_direction_MW", +} + +_PREPATORY_ACTIVITIES_TABLES = [ + "flow_path_augmentation_costs_step_change_and_green_energy_exports_preparatory_activities", + "flow_path_augmentation_costs_progressive_change_preparatory_activities", +] + +_REZ_CONNECTION_PREPATORY_ACTIVITIES_TABLES = [ + "rez_augmentation_costs_step_change_and_green_energy_exports_preparatory_activities", + "rez_augmentation_costs_progressive_change_preparatory_activities", +] + +_ACTIONABLE_ISP_PROJECTS_TABLES = [ + "flow_path_augmentation_costs_step_change_and_green_energy_exports_actionable_isp_projects", + "flow_path_augmentation_costs_progressive_change_actionable_isp_projects", +] + +_PREPATORY_ACTIVITIES_NAME_TO_OPTION_NAME = { + "500kV QNI Connect (NSW works)": "NNSW–SQ Option 5", + "500kV QNI Connect (QLD works)": "NNSW–SQ Option 5", + "330kV QNI single circuit (NSW works)": "NNSW–SQ Option 1", + "330kV QNI single circuit (QLD works)": "NNSW–SQ Option 1", + "330kV QNI double circuit (NSW works)": "NNSW–SQ Option 2", + "330kV QNI double circuit (QLD works)": "NNSW–SQ Option 2", + "CQ-GG": "CQ-GG Option 1", + "Sydney Southern Ring": "CNSW-SNW Option 2", +} + +_REZ_PREPATORY_ACTIVITIES_NAME_TO_REZ_AND_OPTION_NAME = { + "Darling Downs REZ Expansion(Stage 1)": ["SWQLD1", "Option 1"], + "South East SA REZ": ["S1-TBMO", "Option 1"], + "South West Victoria REZ Option 1": ["SWV1", "Option 1"], + "South West Victoria REZ Option 1A": ["SWV1", "Option 1A"], + "South West Victoria REZ Option 1B": ["SWV1", "Option 1B"], + "South West Victoria REZ Option 1C": ["SWV1", "Option 1C"], + "South West Victoria REZ Option 2A": ["SWV1", "Option 2A"], + "South West Victoria REZ Option 2B": ["SWV1", "Option 2B"], + "South West Victoria REZ Option 3A": ["SWV1", "Option 3A"], + "South West Victoria REZ Option 3B": ["SWV1", "Option 3B"], +} + +_PREPATORY_ACTIVITIES_OPTION_NAME_TO_FLOW_PATH = { + "NNSW–SQ Option 5": "NNSW-SQ", + "NNSW–SQ Option 1": "NNSW-SQ", + "NNSW–SQ Option 2": "NNSW-SQ", + "CNSW-SNW Option 2": "CNSW-SNW", + "CQ-GG Option 1": "CQ-GG", +} + +_ACTIONABLE_ISP_PROJECTS_NAME_TO_OPTION_NAME = { + "Humelink": "SNSW-CNSW Option 1 (HumeLink)", + "VNI West": "VIC-SNSW Option 1 - VNI West (Kerang)", + "Project Marinus Stage 1": "TAS-VIC Option 1 (Project Marinus Stage 1)", + "Project Marinus Stage 2": "TAS-VIC Option 2 (Project Marinus Stage 2)", +} + +_ACTIONABLE_ISP_PROJECTS_OPTION_NAME_TO_FLOW_PATH = { + "SNSW-CNSW Option 1 (HumeLink)": "SNSW-CNSW", + "VIC-SNSW Option 1 - VNI West (Kerang)": "VIC-SNSW", + "TAS-VIC Option 1 (Project Marinus Stage 1)": "TAS-VIC", + "TAS-VIC Option 2 (Project Marinus Stage 2)": "TAS-VIC", +} + +# Transmission cost processing configurations +_FLOW_PATH_CONFIG = { + "transmission_type": "flow_path", + "in_coming_column_mappings": { + "Flow path": "id", + "Flow Path": "id", + "Option Name": "option", + "Option": "option", + "Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Forward direction": "forward_capacity_increase", + "Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Reverse direction": "reverse_capacity_increase", + }, + "out_going_column_mappings": { + "id": "flow_path", + "nominal_capacity_increase": "additional_network_capacity_mw", + }, + "table_names": { + "augmentation": _FLOW_PATH_AGUMENTATION_TABLES, + "cost": { + "progressive_change": _FLOW_PATH_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE, + "step_change_and_green_energy_exports": _FLOW_PATH_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS, + }, + "prep_activities": _PREPATORY_ACTIVITIES_TABLES, + "actionable_projects": _ACTIONABLE_ISP_PROJECTS_TABLES, + }, + "mappings": { + "prep_activities_name_to_option": _PREPATORY_ACTIVITIES_NAME_TO_OPTION_NAME, + "option_to_id": _PREPATORY_ACTIVITIES_OPTION_NAME_TO_FLOW_PATH, + "actionable_name_to_option": _ACTIONABLE_ISP_PROJECTS_NAME_TO_OPTION_NAME, + "actionable_option_to_id": _ACTIONABLE_ISP_PROJECTS_OPTION_NAME_TO_FLOW_PATH, + }, +} + +_REZ_CONFIG = { + "transmission_type": "rez", + "in_coming_column_mappings": { + "REZ constraint ID": "id", + "REZ / Constraint ID": "id", + "Option": "option", + "REZ": "rez", + "REZ Name": "rez", + "Additional network capacity (MW)": "nominal_capacity_increase", + }, + "out_going_column_mappings": { + "id": "rez_constraint_id", + "nominal_capacity_increase": "additional_network_capacity_mw", + }, + "table_names": { + "augmentation": _REZ_CONNECTION_AGUMENTATION_TABLES, + "cost": { + "progressive_change": _REZ_AUGMENTATION_COST_TABLES_PROGRESSIVE_CHANGE, + "step_change_and_green_energy_exports": _REZ_AUGMENTATION_COST_TABLES_STEP_CHANGE_AND_GREEN_ENERGY_EXPORTS, + }, + "prep_activities": _REZ_CONNECTION_PREPATORY_ACTIVITIES_TABLES, + }, + "prep_activities_mapping": _REZ_PREPATORY_ACTIVITIES_NAME_TO_REZ_AND_OPTION_NAME, +} diff --git a/src/ispypsa/templater/renewable_energy_zones.py b/src/ispypsa/templater/renewable_energy_zones.py index 179798d..69302c0 100644 --- a/src/ispypsa/templater/renewable_energy_zones.py +++ b/src/ispypsa/templater/renewable_energy_zones.py @@ -39,15 +39,10 @@ def _template_rez_build_limits( rez_build_limits[col] = pd.to_numeric(rez_build_limits[col], errors="coerce") cols_where_zero_goes_to_nan = [ "rez_resource_limit_violation_penalty_factor_$m/mw", - "indicative_transmission_expansion_cost_$m/mw", - "indicative_transmission_expansion_cost_$m/mw_tranche_2", - "indicative_transmission_expansion_cost_$m/mw_tranche_3", ] for col in cols_where_zero_goes_to_nan: rez_build_limits.loc[rez_build_limits[col] == 0.0, col] = np.nan - rez_build_limits = _combine_transmission_expansion_cost_to_one_column( - rez_build_limits - ) + rez_build_limits = _process_transmission_limit(rez_build_limits) cols_where_nan_goes_to_zero = [ "wind_generation_total_limits_mw_high", @@ -61,20 +56,18 @@ def _template_rez_build_limits( rez_build_limits = _convert_cost_units( rez_build_limits, "rez_resource_limit_violation_penalty_factor_$m/mw" ) - rez_build_limits = _convert_cost_units( - rez_build_limits, "indicative_transmission_expansion_cost_$m/mw" - ) rez_build_limits = rez_build_limits.rename( columns={ - "indicative_transmission_expansion_cost_$m/mw": "indicative_transmission_expansion_cost_$/mw", "rez_resource_limit_violation_penalty_factor_$m/mw": "rez_solar_resource_limit_violation_penalty_factor_$/mw", } ) + rez_build_limits["carrier"] = "AC" rez_build_limits = rez_build_limits.loc[ :, [ "rez_id", "isp_sub_region_id", + "carrier", "wind_generation_total_limits_mw_high", "wind_generation_total_limits_mw_medium", "wind_generation_total_limits_mw_offshore_floating", @@ -86,7 +79,6 @@ def _template_rez_build_limits( "rez_transmission_network_limit_summer_typical", # Remove while not being used. # "rez_transmission_network_limit_winter_reference", - "indicative_transmission_expansion_cost_$/mw", ], ] return rez_build_limits diff --git a/src/ispypsa/translator/create_pypsa_friendly_inputs.py b/src/ispypsa/translator/create_pypsa_friendly_inputs.py index 7667cff..181e30d 100644 --- a/src/ispypsa/translator/create_pypsa_friendly_inputs.py +++ b/src/ispypsa/translator/create_pypsa_friendly_inputs.py @@ -15,34 +15,27 @@ create_pypsa_friendly_bus_demand_timeseries, ) from ispypsa.translator.custom_constraints import ( - _translate_custom_constraint_lhs, - _translate_custom_constraint_rhs, - _translate_custom_constraints_generators, + _translate_custom_constraints, ) from ispypsa.translator.generators import ( _create_unserved_energy_generators, _translate_ecaa_generators, create_pypsa_friendly_existing_generator_timeseries, ) -from ispypsa.translator.lines import _translate_flow_paths_to_lines -from ispypsa.translator.mappings import ( - _CUSTOM_CONSTRAINT_EXPANSION_COSTS, - _CUSTOM_CONSTRAINT_LHS_TABLES, - _CUSTOM_CONSTRAINT_RHS_TABLES, -) +from ispypsa.translator.links import _translate_flow_paths_to_links from ispypsa.translator.renewable_energy_zones import ( - _translate_renewable_energy_zone_build_limits_to_flow_paths, + _translate_renewable_energy_zone_build_limits_to_links, ) from ispypsa.translator.snapshots import ( _create_investment_period_weightings, create_pypsa_friendly_snapshots, ) -_BASE_TRANSLATOR_OUPUTS = [ +_BASE_TRANSLATOR_OUTPUTS = [ "snapshots", "investment_period_weights", "buses", - "lines", + "links", "generators", "custom_constraints_lhs", "custom_constraints_rhs", @@ -101,7 +94,7 @@ def create_pypsa_friendly_inputs( ) buses = [] - lines = [] + links = [] if config.network.nodes.regional_granularity == "sub_regions": buses.append(_translate_isp_sub_regions_to_buses(ispypsa_tables["sub_regions"])) @@ -122,55 +115,26 @@ def create_pypsa_friendly_inputs( if config.network.nodes.rezs == "discrete_nodes": buses.append(_translate_rezs_to_buses(ispypsa_tables["renewable_energy_zones"])) - lines.append( - _translate_renewable_energy_zone_build_limits_to_flow_paths( + links.append( + _translate_renewable_energy_zone_build_limits_to_links( ispypsa_tables["renewable_energy_zones"], - config.network.rez_transmission_expansion, - config.wacc, - config.network.annuitisation_lifetime, - config.network.rez_to_sub_region_transmission_default_limit, + ispypsa_tables["rez_transmission_expansion_costs"], + config, ) ) if config.network.nodes.regional_granularity != "single_region": - lines.append( - _translate_flow_paths_to_lines( - ispypsa_tables["flow_paths"], - config.network.transmission_expansion, - config.wacc, - config.network.annuitisation_lifetime, - ) - ) + links.append(_translate_flow_paths_to_links(ispypsa_tables, config)) pypsa_inputs["buses"] = pd.concat(buses) - if len(lines) > 0: - pypsa_inputs["lines"] = pd.concat(lines) + if len(links) > 0: + pypsa_inputs["links"] = pd.concat(links) else: - pypsa_inputs["lines"] = pd.DataFrame() + pypsa_inputs["links"] = pd.DataFrame() - custom_constraint_lhs_tables = [ - ispypsa_tables[table] for table in _CUSTOM_CONSTRAINT_LHS_TABLES - ] - pypsa_inputs["custom_constraints_lhs"] = _translate_custom_constraint_lhs( - custom_constraint_lhs_tables - ) - custom_constraint_rhs_tables = [ - ispypsa_tables[table] for table in _CUSTOM_CONSTRAINT_RHS_TABLES - ] - pypsa_inputs["custom_constraints_rhs"] = _translate_custom_constraint_rhs( - custom_constraint_rhs_tables - ) - custom_constraint_generators = [ - ispypsa_tables[table] for table in _CUSTOM_CONSTRAINT_EXPANSION_COSTS - ] - pypsa_inputs["custom_constraints_generators"] = ( - _translate_custom_constraints_generators( - custom_constraint_generators, - config.network.rez_transmission_expansion, - config.wacc, - config.network.annuitisation_lifetime, - ) + pypsa_inputs.update( + _translate_custom_constraints(config, ispypsa_tables, pypsa_inputs["links"]) ) return pypsa_inputs @@ -291,7 +255,7 @@ def create_pypsa_friendly_timeseries_inputs( def list_translator_output_files(output_path: Path | None = None) -> list[Path]: - files = _BASE_TRANSLATOR_OUPUTS + files = _BASE_TRANSLATOR_OUTPUTS if output_path is not None: files = [output_path / Path(file + ".csv") for file in files] return files diff --git a/src/ispypsa/translator/custom_constraints.py b/src/ispypsa/translator/custom_constraints.py index 4912498..68504dc 100644 --- a/src/ispypsa/translator/custom_constraints.py +++ b/src/ispypsa/translator/custom_constraints.py @@ -1,133 +1,668 @@ -from pathlib import Path - +import numpy as np import pandas as pd -from ispypsa.translator.helpers import _annuitised_investment_costs +from ispypsa.config import ( + ModelConfig, +) +from ispypsa.translator.links import _translate_time_varying_expansion_costs from ispypsa.translator.mappings import ( _CUSTOM_CONSTRAINT_ATTRIBUTES, - _CUSTOM_CONSTRAINT_EXPANSION_COSTS, - _CUSTOM_CONSTRAINT_LHS_TABLES, - _CUSTOM_CONSTRAINT_RHS_TABLES, _CUSTOM_CONSTRAINT_TERM_TYPE_TO_ATTRIBUTE_TYPE, _CUSTOM_CONSTRAINT_TERM_TYPE_TO_COMPONENT_TYPE, ) -def _combine_custom_constraints_tables(custom_constraint_tables: list[pd.DataFrame]): - """Combines a set of custom constraint data tables into a single data table, - renaming the columns so that they are consistent. +def _translate_custom_constraints( + config: ModelConfig, + ispypsa_tables: dict[str, pd.DataFrame], + links: pd.DataFrame, +): + """Translate custom constraint tables into a PyPSA friendly format and define any + endogenous custom constraints. Args: - custom_constraint_tables: list of pd.DataFrames specifying custom constraint - details - Returns: pd.DataFrame + config: `ispypsa.config.ModelConfig` object + ispypsa_tables: dictionary of dataframes providing the `ISPyPSA` input tables. + The relevant tables for this function are: + - custom_constraints_rhs + - custom_constraints_lhs + links: pd.DataFrame specifying the Link components to be used in the PyPSA model + + Returns: dictionary of dataframes in the `PyPSA` friendly format, with the relevant + tables for custom constraints. New tables are: + - custom_constraints_rhs (optional) + - custom_constraints_lhs (optional) + - custom_constraint_generators (optional) + """ - combined_data = [] - for table in custom_constraint_tables: - table = table.rename(columns=_CUSTOM_CONSTRAINT_ATTRIBUTES) - cols_to_keep = [ - col - for col in table.columns - if col in _CUSTOM_CONSTRAINT_ATTRIBUTES.values() - ] - table = table.loc[:, cols_to_keep] - combined_data.append(table) - combined_data = pd.concat(combined_data) - return combined_data + + pypsa_inputs = {} + lhs = [] + rhs = [] + + # Process manual custom constraints + manual_lhs, manual_rhs, generators = _process_manual_custom_constraints( + config, ispypsa_tables, links + ) + + _append_if_not_empty(lhs, manual_lhs) + _append_if_not_empty(rhs, manual_rhs) + + if generators is not None: + pypsa_inputs["custom_constraints_generators"] = generators + + # Process transmission expansion limit constraints + transmission_expansion_limit_lhs, transmission_expansion_limit_rhs = ( + _create_expansion_limit_constraints( + links, + pypsa_inputs.get("custom_constraints_generators"), + ispypsa_tables.get("flow_path_expansion_costs"), + ispypsa_tables.get("rez_transmission_expansion_costs"), + ) + ) + _append_if_not_empty(lhs, transmission_expansion_limit_lhs) + _append_if_not_empty(rhs, transmission_expansion_limit_rhs) + + # Concatenate all constraints + if lhs and rhs: + pypsa_inputs["custom_constraints_lhs"] = pd.concat(lhs) + pypsa_inputs["custom_constraints_rhs"] = pd.concat(rhs) + + # Check for duplicate constraint names in RHS + _check_duplicate_constraint_names(pypsa_inputs["custom_constraints_rhs"]) + + # Validate that all constraints have matching LHS and RHS + _validate_lhs_rhs_constraints( + pypsa_inputs["custom_constraints_lhs"], + pypsa_inputs["custom_constraints_rhs"], + ) + + return pypsa_inputs + + +def _append_if_not_empty( + target_list: list[pd.DataFrame], + dataframe: pd.DataFrame, +) -> None: + """Append dataframe to list if it's not empty. + + Args: + target_list: List to append to + dataframe: DataFrame to potentially append + """ + if not dataframe.empty: + target_list.append(dataframe) + + +def _process_manual_custom_constraints( + config: ModelConfig, + ispypsa_tables: dict[str, pd.DataFrame], + links: pd.DataFrame, +) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame | None]: + """Process manually specified custom constraints + + Args: + config: `ispypsa.config.ModelConfig` object + ispypsa_tables: dictionary of dataframes providing the `ISPyPSA` input tables. + links: pd.DataFrame specifying the Link components to be used in the PyPSA model + + Returns: + Tuple of (lhs, rhs, generators) where: + - lhs: DataFrame with left-hand side constraint definitions + - rhs: DataFrame with right-hand side constraint values + - generators: DataFrame with custom constraint generators (None if REZ expansion is disabled) + """ + + if not _has_manual_custom_constraints(ispypsa_tables): + return pd.DataFrame(), pd.DataFrame(), None + + lhs = _translate_custom_constraint_lhs(ispypsa_tables["custom_constraints_lhs"]) + lhs = _expand_link_flow_lhs_terms(lhs, links) + rhs = _translate_custom_constraint_rhs(ispypsa_tables["custom_constraints_rhs"]) + + generators = None + + # Create dummy generators for REZ transmission expansion + if config.network.rez_transmission_expansion: + generators = _translate_custom_constraints_generators( + list(rhs["constraint_name"]), + ispypsa_tables["rez_transmission_expansion_costs"], + config.wacc, + config.network.annuitisation_lifetime, + config.temporal.capacity_expansion.investment_periods, + config.temporal.year_type, + ) + + # Add generator constraints to LHS + custom_constraint_generators_lhs = ( + _translate_custom_constraint_generators_to_lhs(generators) + ) + + lhs = pd.concat([lhs, custom_constraint_generators_lhs]) + + return lhs, rhs, generators + + +def _has_manual_custom_constraints( + ispypsa_tables: dict[str, pd.DataFrame], +) -> bool: + """Check if manual custom constraint tables are present and not empty. + + Args: + ispypsa_tables: dictionary of dataframes providing the `ISPyPSA` input tables. + + Returns: + True if both custom_constraints_lhs and custom_constraints_rhs exist and are not empty + """ + lhs_exists = ( + "custom_constraints_lhs" in ispypsa_tables + and ispypsa_tables["custom_constraints_lhs"] is not None + and not ispypsa_tables["custom_constraints_lhs"].empty + ) + + rhs_exists = ( + "custom_constraints_rhs" in ispypsa_tables + and ispypsa_tables["custom_constraints_rhs"] is not None + and not ispypsa_tables["custom_constraints_rhs"].empty + ) + + if lhs_exists and rhs_exists: + return True + elif not lhs_exists and not rhs_exists: + return False + else: + raise ValueError("Incomplete manual custom constraints tables provided.") def _translate_custom_constraints_generators( - custom_constraint_generators: list[pd.DataFrame], - expansion_on: bool, + custom_constraints: list[str], + rez_expansion_costs: pd.DataFrame, wacc: float, asset_lifetime: int, + investment_periods: list[int], + year_type: str, ) -> pd.DataFrame: - """Combines all tables specifying the expansion costs of custom constraint - rhs values into a single pd.Dataframe formatting the data so the rhs - can be represented by PyPSA generator components. PyPSA can then invest in - additional capacity for the generators which is used in the custom constraints - to represent additional transmission capacity. - - Args: - custom_constraint_generators: list of pd.DataFrames in `ISPyPSA` detailing - custom constraint generator expansion costs. - expansion_on: bool indicating if transmission line expansion is considered. - wacc: float, as fraction, indicating the weighted average coast of capital for - transmission line investment, for the purposes of annuitising capital - costs. - asset_lifetime: int specifying the nominal asset lifetime in years or the - purposes of annuitising capital costs. + """Translates REZ network expansion data into custom generators for modelling + rez constraint relaxation. + + Args: + custom_constraints: list of custom constraints to create expansion generators + for. + rez_expansion_costs: pd.DataFrame with time-varying expansion costs. + wacc: float indicating the weighted average coast of capital. + asset_lifetime: int specifying the nominal asset lifetime in years. + investment_periods: list of investment years for time-varying costs. + year_type: temporal configuration ("fy" or "calendar") for time-varying costs. Returns: pd.DataFrame """ - custom_constraint_generators = _combine_custom_constraints_tables( - custom_constraint_generators + rez_expansion_costs = rez_expansion_costs[ + rez_expansion_costs["rez_constraint_id"].isin(custom_constraints) + ] + + expansion_generators = _translate_time_varying_expansion_costs( + expansion_costs=rez_expansion_costs, + cost_column_suffix="_$/mw", + investment_periods=investment_periods, + year_type=year_type, + wacc=wacc, + asset_lifetime=asset_lifetime, ) - custom_constraint_generators = custom_constraint_generators.rename( - columns={"variable_name": "name"} + expansion_generators = expansion_generators.rename( + columns={ + "rez_constraint_id": "constraint_name", + "investment_year": "build_year", + } ) - custom_constraint_generators["bus"] = "bus_for_custom_constraint_gens" - custom_constraint_generators["p_nom"] = 0.0 + return _format_expansion_generators(expansion_generators) - # The generator size is only used for additional transmission capacity, so it - # initial size is 0.0. - custom_constraint_generators["capital_cost"] = custom_constraint_generators[ - "capital_cost" - ].apply(lambda x: _annuitised_investment_costs(x, wacc, asset_lifetime)) - # not extendable by default - custom_constraint_generators["p_nom_extendable"] = False - mask = ~custom_constraint_generators["capital_cost"].isna() - custom_constraint_generators.loc[mask, "p_nom_extendable"] = expansion_on +def _format_expansion_generators( + expansion_generators: pd.DataFrame, +) -> pd.DataFrame: + """Format expansion generators with required fields for PyPSA. + + Args: + expansion_generators: DataFrame with basic generator info - return custom_constraint_generators + Returns: + Formatted DataFrame with all required PyPSA generator columns + """ + expansion_generators["name"] = ( + expansion_generators["constraint_name"] + + "_exp_" + + expansion_generators["build_year"].astype(str) + ) + expansion_generators["isp_name"] = expansion_generators["constraint_name"] + expansion_generators["bus"] = "bus_for_custom_constraint_gens" + expansion_generators["p_nom"] = 0.0 + expansion_generators["p_nom_extendable"] = True + expansion_generators["lifetime"] = np.inf + + # Keep only the columns needed for PyPSA generators + expansion_cols = [ + "name", + "isp_name", + "bus", + "p_nom", + "p_nom_extendable", + "build_year", + "lifetime", + "capital_cost", + ] + return expansion_generators[expansion_cols].reset_index(drop=True) def _translate_custom_constraint_rhs( - custom_constraint_rhs_tables: list[pd.DataFrame], + custom_constraint_rhs_table: pd.DataFrame, ) -> pd.DataFrame: - """Combines all tables specifying the rhs values of custom constraints into a single - pd.Dataframe. + """Change RHS custom constraints to PyPSA style. Args: - custom_constraint_rhs_tables: list of pd.DataFrames in `ISPyPSA` detailing + custom_constraint_rhs_table: pd.DataFrame in `ISPyPSA` detailing custom constraints rhs values. Returns: pd.DataFrame """ - custom_constraint_rhs_values = _combine_custom_constraints_tables( - custom_constraint_rhs_tables - ) - return custom_constraint_rhs_values + return custom_constraint_rhs_table.rename(columns=_CUSTOM_CONSTRAINT_ATTRIBUTES) def _translate_custom_constraint_lhs( - custom_constraint_lhs_tables: list[pd.DataFrame], + custom_constraint_lhs_table: pd.DataFrame, ) -> pd.DataFrame: - """Combines all tables specifying the lhs values of custom constraints into a single - pd.Dataframe. + """Change RHS custom constraints to PyPSA style. Args: - custom_constraint_lhs_tables: list of pd.DataFrames in `ISPyPSA` detailing + custom_constraint_lhs_table: list of pd.DataFrames in `ISPyPSA` detailing custom constraints lhs values. Returns: pd.DataFrame """ - custom_constraint_lhs_values = _combine_custom_constraints_tables( - custom_constraint_lhs_tables + custom_constraint_lhs_values = custom_constraint_lhs_table.rename( + columns=_CUSTOM_CONSTRAINT_ATTRIBUTES + ) + return _add_component_and_attribute_columns(custom_constraint_lhs_values) + + +def _add_component_and_attribute_columns( + lhs_values: pd.DataFrame, +) -> pd.DataFrame: + """Add component and attribute columns based on term_type and remove term_type. + + Args: + lhs_values: DataFrame with term_type column + + Returns: + DataFrame with component and attribute columns, term_type removed + """ + lhs_values["component"] = lhs_values["term_type"].map( + _CUSTOM_CONSTRAINT_TERM_TYPE_TO_COMPONENT_TYPE + ) + lhs_values["attribute"] = lhs_values["term_type"].map( + _CUSTOM_CONSTRAINT_TERM_TYPE_TO_ATTRIBUTE_TYPE + ) + return lhs_values.drop(columns="term_type") + + +def _translate_custom_constraint_generators_to_lhs( + custom_constraint_generators: pd.DataFrame, +) -> pd.DataFrame: + """Create the lhs definitions to match the generators used to relax custom + constraints + + Args: + custom_constraint_generators: pd.DataFrames detailing the + custom constraint generators + + Returns: pd.DataFrame + """ + custom_constraint_generators = custom_constraint_generators.rename( + columns={"isp_name": "constraint_name", "name": "variable_name"} ) + custom_constraint_generators["component"] = "Generator" + custom_constraint_generators["attribute"] = "p_nom" + custom_constraint_generators["coefficient"] = -1.0 + col_order = [ + "constraint_name", + "variable_name", + "component", + "attribute", + "coefficient", + ] + return custom_constraint_generators.loc[:, col_order] - custom_constraint_lhs_values["component"] = custom_constraint_lhs_values[ - "term_type" - ].map(_CUSTOM_CONSTRAINT_TERM_TYPE_TO_COMPONENT_TYPE) - custom_constraint_lhs_values["attribute"] = custom_constraint_lhs_values[ - "term_type" - ].map(_CUSTOM_CONSTRAINT_TERM_TYPE_TO_ATTRIBUTE_TYPE) +def _expand_link_flow_lhs_terms( + custom_constraint_lhs: pd.DataFrame, + links: pd.DataFrame, +) -> pd.DataFrame: + """Create lhs terms for each existing link component and each expansion option + link component. + + Args: + custom_constraint_lhs: pd.DataFrame specifying lhs terms of custom + constraints in PyPSA friendly format. + links: pd.DataFrame detailing the PyPSA links to be included in the model. - custom_constraint_lhs_values = custom_constraint_lhs_values.drop( - columns="term_type" + Returns: pd.DataFrame specifying lhs terms of custom + constraints in PyPSA friendly format. + + Raises: + ValueError: If any link_flow terms reference links that don't exist. + """ + link_flow_mask = (custom_constraint_lhs["component"] == "Link") & ( + custom_constraint_lhs["attribute"] == "p" ) - return custom_constraint_lhs_values + link_flow_terms = custom_constraint_lhs[link_flow_mask] + non_link_flow_terms = custom_constraint_lhs[~link_flow_mask] + + # Check for unmatched link_flow terms + if not link_flow_terms.empty: + unique_link_flow_names = set(link_flow_terms["variable_name"]) + + # Handle None or empty links DataFrame + if links is None or links.empty: + unique_link_isp_names = set() + else: + unique_link_isp_names = set(links["isp_name"]) + + unmatched_links = unique_link_flow_names - unique_link_isp_names + + if unmatched_links: + raise ValueError( + f"The following link_flow terms reference links that don't exist: " + f"{sorted(unmatched_links)}" + ) + + all_lhs_terms = [non_link_flow_terms] + + # Only perform merge if there are link_flow terms and links is not None/empty + if not link_flow_terms.empty and links is not None and not links.empty: + link_flow_terms = pd.merge( + link_flow_terms, + links.loc[:, ["isp_name", "name"]], + left_on="variable_name", + right_on="isp_name", + ) + link_flow_terms = link_flow_terms.drop(columns=["isp_name", "variable_name"]) + link_flow_terms = link_flow_terms.rename(columns={"name": "variable_name"}) + all_lhs_terms.append(link_flow_terms) + + return pd.concat(all_lhs_terms) + + +def _create_expansion_limit_constraints( + links, + constraint_generators, + flow_paths, + rez_connections, +): + """Create custom constraint lhs and rhs definitions to limit the total expansion + on rez and flow links + + Args: + links: DataFrame specifying the Link components to be added to the PyPSA model. + constraint_generators: DataFrame specifying the generators used to model + rez connection capacity. + flow_paths: DataFrame specifying the total additional + capacity allowed on flow paths. + rez_connections: DataFrame specifying the total + additional capacity allowed on flow paths. + + Returns: Two DataFrames, the first specifying the lhs values of the custom + constraints created, and the second the rhs values. + """ + lhs_parts = [] + links_lhs = _create_expansion_limit_lhs_for_links(links) + _append_if_not_empty(lhs_parts, links_lhs) + generators_lhs = _create_expansion_limit_lhs_for_generators(constraint_generators) + _append_if_not_empty(lhs_parts, generators_lhs) + lhs = _finalize_expansion_limit_lhs(lhs_parts) + + rhs_parts = [] + flow_path_rhs = _process_rhs_components(flow_paths) + _append_if_not_empty(rhs_parts, flow_path_rhs) + rez_connections = _get_isp_names_for_rez_connection(rez_connections, links) + rez_rhs = _process_rhs_components(rez_connections) + _append_if_not_empty(rhs_parts, rez_rhs) + rhs = _finalize_expansion_limit_rhs(rhs_parts) + rhs = _filter_rhs_by_lhs_constraints(rhs, lhs) + + return lhs, rhs + + +def _get_isp_names_for_rez_connection( + rez_connections: pd.DataFrame, + links: pd.DataFrame, +) -> pd.DataFrame: + """Update the rez_connection limits to use names that match with links.""" + + if ( + rez_connections is not None + and not rez_connections.empty + and links is not None + and not links.empty + ): + # Merge in the isp flow path names. + rez_connections = pd.merge( + rez_connections, + links.groupby("isp_name", as_index=False).first(), + how="left", + left_on="rez_constraint_id", + right_on="bus0", + ) + + # If there aren't matching links that means the costs are for transmission + # constraints modelled with dummy generators and the rez_constraint_id name should + # be kept. + rez_connections.loc[rez_connections["isp_name"].isna(), "isp_name"] = ( + rez_connections["rez_constraint_id"] + ) + rez_connections = rez_connections.loc[ + :, ["isp_name", "additional_network_capacity_mw"] + ] + + return rez_connections + + +def _process_rhs_components( + rhs_components: pd.DataFrame | None, +) -> pd.DataFrame: + """Process flow path expansion costs into RHS constraint format. + + Args: + flow_paths: DataFrame with flow path expansion costs or None + + Returns: + DataFrame with processed flow path constraints, empty if input is None + + Raises: + ValueError: If required columns are missing after processing + """ + if rhs_components is None or rhs_components.empty: + return pd.DataFrame() + + rhs_cols = [ + col for col in rhs_components if col in _CUSTOM_CONSTRAINT_ATTRIBUTES.keys() + ] + + rhs_components = rhs_components.loc[:, rhs_cols] + result = rhs_components.rename(columns=_CUSTOM_CONSTRAINT_ATTRIBUTES) + + # Check that required columns are present after renaming + required_columns = {"constraint_name", "rhs"} + missing_columns = required_columns - set(result.columns) + if missing_columns: + raise ValueError( + f"RHS components missing required columns after processing: {sorted(missing_columns)}" + ) + + return result + + +def _create_expansion_limit_lhs_for_links( + links: pd.DataFrame | None, +) -> pd.DataFrame: + """Create constraints LHS for link expansion limits. + + Args: + links: DataFrame with link information + + Returns: + DataFrame with LHS constraint definitions for links + """ + if links is None or links.empty: + return pd.DataFrame() + + links = links.loc[links["p_nom_extendable"], ["isp_name", "name"]] + links = links.rename(columns=_CUSTOM_CONSTRAINT_ATTRIBUTES) + links["component"] = "Link" + links["attribute"] = "p_nom" + links["coefficient"] = 1.0 + return links + + +def _create_expansion_limit_lhs_for_generators( + constraint_generators: pd.DataFrame | None, +) -> pd.DataFrame: + """Create LHS constraints for generator expansion limits. + + Args: + constraint_generators: DataFrame with constraint generators or None + + Returns: + DataFrame with LHS constraint definitions for generators, empty if input is None + """ + if constraint_generators is None or constraint_generators.empty: + return pd.DataFrame() + + generators_lhs = constraint_generators.loc[:, ["isp_name", "name"]] + generators_lhs = generators_lhs.rename(columns=_CUSTOM_CONSTRAINT_ATTRIBUTES) + generators_lhs["component"] = "Generator" + generators_lhs["attribute"] = "p_nom" + generators_lhs["coefficient"] = 1.0 + return generators_lhs + + +def _finalize_expansion_limit_rhs( + rhs_parts: list[pd.DataFrame], +) -> pd.DataFrame: + """Combine and finalize RHS constraint parts. + + Args: + rhs_parts: List of DataFrames with RHS constraint definitions + + Returns: + Combined DataFrame with finalized RHS constraints, empty if no parts + """ + if not rhs_parts: + return pd.DataFrame() + + rhs = pd.concat(rhs_parts) + rhs["constraint_name"] = rhs["constraint_name"] + "_expansion_limit" + rhs["constraint_type"] = "<=" + return rhs + + +def _finalize_expansion_limit_lhs( + lhs_parts: list[pd.DataFrame], +) -> pd.DataFrame: + """Combine and finalize LHS constraint parts. + + Args: + lhs_parts: List of DataFrames with LHS constraint definitions + + Returns: + Combined DataFrame with finalized LHS constraints, empty if no parts + """ + if not lhs_parts: + return pd.DataFrame() + + lhs = pd.concat(lhs_parts) + lhs["constraint_name"] = lhs["constraint_name"] + "_expansion_limit" + return lhs + + +def _filter_rhs_by_lhs_constraints( + rhs: pd.DataFrame, + lhs: pd.DataFrame, +) -> pd.DataFrame: + """Filter RHS to only include constraints that have corresponding LHS. + + Args: + rhs: DataFrame with RHS constraint values + lhs: DataFrame with LHS constraint definitions + + Returns: + Filtered RHS DataFrame + """ + if lhs.empty or rhs.empty: + return pd.DataFrame() + return rhs[rhs["constraint_name"].isin(lhs["constraint_name"])] + + +def _check_duplicate_constraint_names( + rhs: pd.DataFrame, +) -> None: + """Check for duplicate constraint names in RHS. + + Args: + rhs: DataFrame with RHS constraint values + + Raises: + ValueError: If duplicate constraint names are found + """ + if rhs.empty: + return + + duplicates = rhs[rhs.duplicated(subset=["constraint_name"], keep=False)] + if not duplicates.empty: + duplicate_names = sorted(duplicates["constraint_name"].unique()) + raise ValueError( + f"Duplicate constraint names found in custom constraints RHS: {duplicate_names}" + ) + + +def _validate_lhs_rhs_constraints( + lhs: pd.DataFrame, + rhs: pd.DataFrame, +) -> None: + """Validate that all LHS constraints have RHS definitions and vice versa. + + Args: + lhs: DataFrame with LHS constraint definitions + rhs: DataFrame with RHS constraint values + + Raises: + ValueError: If there are mismatched constraints + """ + if lhs.empty and rhs.empty: + return + + if not lhs.empty: + lhs_constraint_names = set(lhs["constraint_name"].unique()) + else: + lhs_constraint_names = set() + + if not rhs.empty: + rhs_constraint_names = set(rhs["constraint_name"].unique()) + else: + rhs_constraint_names = set() + + # Check for LHS without RHS + lhs_without_rhs = lhs_constraint_names - rhs_constraint_names + if lhs_without_rhs: + raise ValueError( + f"The following LHS constraints do not have corresponding RHS definitions: " + f"{sorted(lhs_without_rhs)}" + ) + + # Check for RHS without LHS + rhs_without_lhs = rhs_constraint_names - lhs_constraint_names + if rhs_without_lhs: + raise ValueError( + f"The following RHS constraints do not have corresponding LHS definitions: " + f"{sorted(rhs_without_lhs)}" + ) diff --git a/src/ispypsa/translator/lines.py b/src/ispypsa/translator/lines.py deleted file mode 100644 index 48d6444..0000000 --- a/src/ispypsa/translator/lines.py +++ /dev/null @@ -1,42 +0,0 @@ -from pathlib import Path - -import pandas as pd - -from ispypsa.translator.helpers import _annuitised_investment_costs -from ispypsa.translator.mappings import _LINE_ATTRIBUTES - - -def _translate_flow_paths_to_lines( - flow_paths: pd.DataFrame, - expansion_on: bool, - wacc: float, - asset_lifetime: int, -) -> pd.DataFrame: - """Process network line data into a format aligned with PyPSA inputs. - - Args: - flow_paths: `ISPyPSA` formatted pd.DataFrame detailing flow path capabilities - between regions or sub regions depending on granularity. - expansion_on: bool indicating if transmission line expansion is considered. - wacc: float, as fraction, indicating the weighted average coast of capital for - transmission line investment, for the purposes of annuitising capital - costs. - asset_lifetime: int specifying the nominal asset lifetime in years or the - purposes of annuitising capital costs. - - Returns: - `pd.DataFrame`: PyPSA style generator attributes in tabular format. - """ - lines = flow_paths.loc[:, _LINE_ATTRIBUTES.keys()] - lines = lines.rename(columns=_LINE_ATTRIBUTES) - - lines["capital_cost"] = lines["capital_cost"].apply( - lambda x: _annuitised_investment_costs(x, wacc, asset_lifetime) - ) - - # not extendable by default - lines["s_nom_extendable"] = False - # If a non-nan capital_cost is given then set to extendable - lines.loc[~lines["capital_cost"].isna(), "s_nom_extendable"] = expansion_on - - return lines diff --git a/src/ispypsa/translator/links.py b/src/ispypsa/translator/links.py new file mode 100644 index 0000000..ec3b020 --- /dev/null +++ b/src/ispypsa/translator/links.py @@ -0,0 +1,249 @@ +import numpy as np +import pandas as pd + +from ispypsa.config import ModelConfig +from ispypsa.translator.helpers import _annuitised_investment_costs +from ispypsa.translator.mappings import _LINK_ATTRIBUTES + + +def _translate_flow_paths_to_links( + ispypsa_tables: dict[str, pd.DataFrame], + config: ModelConfig, +) -> pd.DataFrame: + """Process network line data into the PyPSA friendly format. + + Args: + ispypsa_tables: Dictionary of ISPyPSA DataFrames, expecting "flow_paths" + and "flow_path_expansion_costs". + config: Configuration object with temporal, WACC, and network lifetime settings. + + Returns: + pd.DataFrame: PyPSA style links attributes in tabular format, including both + existing links and potential expansion links. + """ + existing_flow_paths_df = ispypsa_tables["flow_paths"] + existing_links = _translate_existing_flow_path_capacity_to_links( + existing_flow_paths_df + ) + + if config.network.transmission_expansion: + expansion_links = _translate_expansion_costs_to_links( + ispypsa_tables["flow_path_expansion_costs"], + existing_links.copy(), + config.temporal.capacity_expansion.investment_periods, + config.temporal.year_type, + config.wacc, + config.network.annuitisation_lifetime, + "flow_path", + "isp_name", + ) + else: + expansion_links = pd.DataFrame() + + all_links = pd.concat( + [existing_links, expansion_links], ignore_index=True, sort=False + ) + + return all_links + + +def _translate_existing_flow_path_capacity_to_links( + existing_flow_paths: pd.DataFrame, +) -> pd.DataFrame: + """Translates existing flow path capacities to PyPSA link components. + + Args: + existing_flow_paths: DataFrame from ispypsa_tables["flow_paths"]. + + Returns: + `pd.DataFrame`: PyPSA style link attributes in tabular format. + """ + links_df = existing_flow_paths.loc[:, list(_LINK_ATTRIBUTES.keys())].copy() + links_df = links_df.rename(columns=_LINK_ATTRIBUTES) + + links_df["isp_name"] = links_df["name"].copy() + + links_df["name"] = links_df["name"] + "_existing" + links_df["p_nom_extendable"] = False + links_df["p_min_pu"] = -1.0 * (links_df["p_nom_reverse"] / links_df["p_nom"]) + links_df["build_year"] = 0 + links_df["lifetime"] = np.inf + links_df = links_df.drop(columns=["p_nom_reverse"]) + links_df["capital_cost"] = np.nan + + col_order = [ + "isp_name", + "name", + "carrier", + "bus0", + "bus1", + "p_nom", + "p_min_pu", + "build_year", + "lifetime", + "capital_cost", + "p_nom_extendable", + ] + + return links_df.loc[:, col_order] + + +def _translate_expansion_costs_to_links( + expansion_costs: pd.DataFrame, + existing_links_df: pd.DataFrame, + investment_periods: list[int], + year_type: str, + wacc: float, + asset_lifetime: int, + id_column: str = "flow_path", + match_column: str = "name", +) -> pd.DataFrame: + """Translates expansion costs to PyPSA link components. + + This function uses the generic _translate_time_varying_expansion_costs function + to process the expansion costs, then creates appropriate link components. + + Args: + expansion_costs: `ISPyPSA` formatted pd.DataFrame detailing + the expansion costs with financial year columns. + existing_links_df: `PyPSA` style link attributes in tabular format. + Used to source bus/carrier data. + investment_periods: List of investment years (e.g., [2025, 2030]). + year_type: Temporal configuration, e.g., "fy" or "calendar". + wacc: Weighted average cost of capital. + asset_lifetime: Nominal asset lifetime in years. + id_column: Column name in expansion_costs containing the identifier. + match_column: Column name in existing_links_df to match with id_column. + + Returns: + `pd.DataFrame`: PyPSA style link attributes in tabular format. + """ + # Use the generic function to process costs + processed_costs = _translate_time_varying_expansion_costs( + expansion_costs=expansion_costs, + cost_column_suffix="_$/mw", + investment_periods=investment_periods, + year_type=year_type, + wacc=wacc, + asset_lifetime=asset_lifetime, + ) + + if processed_costs.empty: + return pd.DataFrame() + + # Prepare for merging with existing links data + attributes_to_carry = ["isp_name", "bus0", "bus1", "carrier"] + + # Merge with existing links to get attributes like bus0, bus1, carrier + df_merged = pd.merge( + processed_costs, + existing_links_df.loc[:, attributes_to_carry], + left_on=id_column, + right_on=match_column, + ) + + # Directly modify df_merged to create the expansion links + df_merged["name"] = ( + df_merged["isp_name"] + "_exp_" + df_merged["investment_year"].astype(str) + ) + df_merged["p_nom"] = 0.0 + df_merged["p_nom_extendable"] = True + df_merged["p_min_pu"] = -1.0 + df_merged["build_year"] = df_merged["investment_year"] + df_merged["lifetime"] = np.inf + + # Keep only the columns needed for PyPSA links + expansion_cols = [ + "isp_name", + "name", + "bus0", + "bus1", + "carrier", + "p_nom", + "p_nom_extendable", + "p_min_pu", + "build_year", + "lifetime", + "capital_cost", + ] + expansion_links = df_merged[expansion_cols] + + return expansion_links + + +def _translate_time_varying_expansion_costs( + expansion_costs: pd.DataFrame, + cost_column_suffix: str, + investment_periods: list[int], + year_type: str, + wacc: float, + asset_lifetime: int, +) -> pd.DataFrame: + """Process time-varying expansion costs for flow paths and rezs. + + Converts from years as columns to years as rows, extracts model year from column + name, and annuitises expansion costs. + + Args: + expansion_costs: DataFrame containing expansion cost data with time-varying costs. + id_column: Name of the column that contains the component identifier. + cost_column_suffix: Suffix for cost columns (e.g. "_$/mw"). + investment_periods: List of investment years (e.g., [2025, 2030]). + year_type: Temporal configuration, e.g., "fy" or "calendar". + wacc: Weighted average cost of capital. + asset_lifetime: Nominal asset lifetime in years. + + Returns: + pd.DataFrame: Processed expansion costs with parsed years and annuitized costs. + """ + if expansion_costs.empty: + return pd.DataFrame() + + # Extract cost columns (those ending with the specified suffix) + cost_cols = [ + col for col in expansion_costs.columns if col.endswith(cost_column_suffix) + ] + id_vars = [col for col in expansion_costs.columns if col not in cost_cols] + + # Melt the dataframe to convert from wide to long format + df_melted = expansion_costs.melt( + id_vars=id_vars, + value_vars=cost_cols, + var_name="cost_year_raw_with_suffix", + value_name="cost_per_unit", + ) + + # Drop rows with NaN costs + df_melted = df_melted.dropna(subset=["cost_per_unit"]) + if df_melted.empty: + return pd.DataFrame() + + # Parse financial year from cost column names + def parse_cost_year(cost_year_raw: str) -> int: + year_part = cost_year_raw.split(cost_column_suffix)[0] # e.g., "2025_26" + if year_type == "fy": + # For financial year format like "2025_26" + yy_part = year_part.split("_")[1] # e.g., "26" + return 2000 + int(yy_part) # e.g., 2026, as per spec + elif year_type == "calendar": + raise NotImplementedError( + f"Calendar years are not implemented yet for transmission costs" + ) + else: + raise ValueError(f"Unknown year_type: {year_type}") + + df_melted["investment_year"] = df_melted["cost_year_raw_with_suffix"].apply( + parse_cost_year + ) + + # Filter to only include costs relevant to our investment periods + df_melted = df_melted[df_melted["investment_year"].isin(investment_periods)] + if df_melted.empty: + return pd.DataFrame() + + # Annuitize the costs + df_melted["capital_cost"] = df_melted["cost_per_unit"].apply( + lambda x: _annuitised_investment_costs(x, wacc, asset_lifetime) + ) + + return df_melted diff --git a/src/ispypsa/translator/mappings.py b/src/ispypsa/translator/mappings.py index 6ec8778..8709c09 100644 --- a/src/ispypsa/translator/mappings.py +++ b/src/ispypsa/translator/mappings.py @@ -6,49 +6,37 @@ _BUS_ATTRIBUTES = {"isp_sub_region_id": "name"} -_LINE_ATTRIBUTES = { - "flow_path_name": "name", +_LINK_ATTRIBUTES = { + "flow_path": "name", + "carrier": "carrier", "node_from": "bus0", "node_to": "bus1", - "forward_direction_mw_summer_typical": "s_nom", - "indicative_transmission_expansion_cost_$/mw": "capital_cost", - # TODO: implement reverse direction limit - # "reverse_direction_mw_summer_typical": "" + "forward_direction_mw_summer_typical": "p_nom", + "reverse_direction_mw_summer_typical": "p_nom_reverse", } -_REZ_LINE_ATTRIBUTES = { +_REZ_LINK_ATTRIBUTES = { "rez_id": "bus0", "isp_sub_region_id": "bus1", - "rez_transmission_network_limit_summer_typical": "s_nom", - "indicative_transmission_expansion_cost_$/mw": "capital_cost", + "carrier": "carrier", + "rez_transmission_network_limit_summer_typical": "p_nom", } _CUSTOM_CONSTRAINT_ATTRIBUTES = { "term_id": "variable_name", - "indicative_transmission_expansion_cost_$/mw": "capital_cost", + "name": "variable_name", + "isp_name": "constraint_name", + "flow_path": "constraint_name", + "additional_network_capacity_mw": "rhs", "constraint_id": "constraint_name", + "rez_constraint_id": "constraint_name", "summer_typical": "rhs", "term_type": "term_type", "coefficient": "coefficient", } -_CUSTOM_CONSTRAINT_EXPANSION_COSTS = [ - "rez_group_constraints_expansion_costs", - "rez_transmission_limit_constraints_expansion_costs", -] - -_CUSTOM_CONSTRAINT_RHS_TABLES = [ - "rez_group_constraints_rhs", - "rez_transmission_limit_constraints_rhs", -] - -_CUSTOM_CONSTRAINT_LHS_TABLES = [ - "rez_group_constraints_lhs", - "rez_transmission_limit_constraints_lhs", -] - _CUSTOM_CONSTRAINT_TERM_TYPE_TO_COMPONENT_TYPE = { - "line_flow": "Line", + "link_flow": "Link", "generator_capacity": "Generator", "generator_output": "Generator", "load_consumption": "Load", @@ -56,7 +44,7 @@ } _CUSTOM_CONSTRAINT_TERM_TYPE_TO_ATTRIBUTE_TYPE = { - "line_flow": "s", + "link_flow": "p", "generator_capacity": "p_nom", "generator_output": "p", "load_consumption": "p", diff --git a/src/ispypsa/translator/renewable_energy_zones.py b/src/ispypsa/translator/renewable_energy_zones.py index 02f4d5c..0d0dda1 100644 --- a/src/ispypsa/translator/renewable_energy_zones.py +++ b/src/ispypsa/translator/renewable_energy_zones.py @@ -1,52 +1,88 @@ -from pathlib import Path - +import numpy as np import pandas as pd -from ispypsa.translator.helpers import _annuitised_investment_costs -from ispypsa.translator.mappings import _REZ_LINE_ATTRIBUTES +from ispypsa.config import ModelConfig +from ispypsa.translator.links import _translate_expansion_costs_to_links +from ispypsa.translator.mappings import _REZ_LINK_ATTRIBUTES -def _translate_renewable_energy_zone_build_limits_to_flow_paths( +def _translate_renewable_energy_zone_build_limits_to_links( renewable_energy_zone_build_limits: pd.DataFrame, - expansion_on: bool, - wacc: float, - asset_lifetime: int, - rez_to_sub_region_transmission_default_limit: float, + rez_expansion_costs: pd.DataFrame, + config: ModelConfig, ) -> pd.DataFrame: """Process renewable energy zone build limit data to format aligned with PyPSA - inputs. + inputs, incorporating time-varying expansion costs. + + Args: + renewable_energy_zone_build_limits: `ISPyPSA` formatted pd.DataFrame detailing + Renewable Energy Zone transmission limits. + rez_expansion_costs: `ISPyPSA` formatted pd.DataFrame detailing Renewable Energy + Zone expansion costs by year. + config: ModelConfig object containing wacc, investment periods, etc. + + Returns: + `pd.DataFrame`: PyPSA style link attributes in tabular format. + """ + # Create existing links from renewable energy zone build limits + existing_links = _translate_existing_rez_connections_to_links( + renewable_energy_zone_build_limits, + config.network.rez_to_sub_region_transmission_default_limit, + ) + + # Create expansion links from rez expansion costs if expansion is enabled + if config.network.rez_transmission_expansion and not rez_expansion_costs.empty: + expansion_links = _translate_expansion_costs_to_links( + expansion_costs=rez_expansion_costs, + existing_links_df=existing_links.copy(), + investment_periods=config.temporal.capacity_expansion.investment_periods, + year_type=config.temporal.year_type, + wacc=config.wacc, + asset_lifetime=config.network.annuitisation_lifetime, + id_column="rez_constraint_id", + match_column="bus0", + ) + # Combine existing and expansion links + all_links = pd.concat( + [existing_links, expansion_links], ignore_index=True, sort=False + ) + else: + all_links = existing_links + + return all_links + + +def _translate_existing_rez_connections_to_links( + renewable_energy_zone_build_limits: pd.DataFrame, + rez_to_sub_region_transmission_default_limit: float, +) -> pd.DataFrame: + """Process existing REZ connection limits to PyPSA links. Args: renewable_energy_zone_build_limits: `ISPyPSA` formatted pd.DataFrame detailing Renewable Energy Zone transmission limits. - wacc: float, as fraction, indicating the weighted average cost of capital for - transmission line investment, for the purposes of annuitising capital - costs. - asset_lifetime: int specifying the nominal asset lifetime in years or the - purposes of annuitising capital costs. rez_to_sub_region_transmission_default_limit: float specifying the transmission limit to use for rez to subregion connections when an explicit limit is not given in the inputs. Returns: - `pd.DataFrame`: PyPSA style line attributes in tabular format. + `pd.DataFrame`: PyPSA style link attributes in tabular format. """ - lines = renewable_energy_zone_build_limits.loc[:, _REZ_LINE_ATTRIBUTES.keys()] - lines = lines.rename(columns=_REZ_LINE_ATTRIBUTES) - lines["name"] = lines["bus0"] + "-" + lines["bus1"] + links = renewable_energy_zone_build_limits.loc[:, _REZ_LINK_ATTRIBUTES.keys()] + links = links.rename(columns=_REZ_LINK_ATTRIBUTES) + links["isp_name"] = links["bus0"] + "-" + links["bus1"] + links["name"] = links["isp_name"] + "_existing" - # Lines without an explicit limit because their limits are modelled through - # custom constraints are given a very large capacity because using inf causes - # infeasibility - lines["s_nom"] = lines["s_nom"].fillna(rez_to_sub_region_transmission_default_limit) + # Links without an explicit limit because their limits are modelled through + # custom constraints are given a very large capacity + links["p_nom"] = links["p_nom"].fillna(rez_to_sub_region_transmission_default_limit) - lines["capital_cost"] = lines["capital_cost"].apply( - lambda x: _annuitised_investment_costs(x, wacc, asset_lifetime) - ) + links["p_min_pu"] = -1.0 + links["build_year"] = 0 + links["lifetime"] = np.inf + links["capital_cost"] = np.nan - # not extendable by default - lines["s_nom_extendable"] = False - # If a non-nan capital_cost is given then set to extendable - lines.loc[~lines["capital_cost"].isna(), "s_nom_extendable"] = expansion_on + # Not extendable for existing links + links["p_nom_extendable"] = False - return lines + return links diff --git a/src/ispypsa/translator/snapshots.py b/src/ispypsa/translator/snapshots.py index 366562e..5d5e594 100644 --- a/src/ispypsa/translator/snapshots.py +++ b/src/ispypsa/translator/snapshots.py @@ -207,7 +207,7 @@ def _create_investment_period_weightings( objective function in each investment period. """ # Add model_end_year to calculate final period length - all_years = investment_periods + [model_end_year] + all_years = investment_periods + [model_end_year + 1] # Calculate period lengths investment_period_lengths = np.diff(all_years).astype("int64") diff --git a/tests/conftest.py b/tests/conftest.py index 1ff7370..b546fe0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,8 +1,174 @@ +import io from pathlib import Path +import pandas as pd import pytest @pytest.fixture(scope="module") def workbook_table_cache_test_path(): return Path("tests", "test_workbook_table_cache") + + +@pytest.fixture +def csv_str_to_df(): + def func(csv_str, **kwargs): + """Helper function to convert a CSV string to a DataFrame.""" + # Remove spaces and tabs that have been included for readability. + csv_str = csv_str.replace(" ", "").replace("\t", "").replace("__", " ") + return pd.read_csv(io.StringIO(csv_str), **kwargs) + + return func + + +@pytest.fixture +def sample_ispypsa_tables(csv_str_to_df): + """ + Fixture that returns a dictionary of dataframes needed to run create_pypsa_friendly_inputs. + + This example set has 2 subregions (CNSW and NNSW) each with one REZ. + All tables from _BASE_TEMPLATE_OUTPUTS are included with appropriate columns and sample data. + """ + tables = {} + + # Sub-regions table + sub_regions_csv = """ + isp_sub_region_id, nem_region_id, sub_region_reference_node, sub_region_reference_node_voltage_kv, substation_latitude, substation_longitude + CNSW, NSW1, Sydney__West__330__kV, 330, -33.8, 150.8 + NNSW, NSW1, Tamworth__330__kV, 330, -31.1, 150.9 + """ + tables["sub_regions"] = csv_str_to_df(sub_regions_csv) + + # NEM regions table + nem_regions_csv = """ + NEM__Region, Regional__Reference__Node, ISP__Sub-region + NSW1, Sydney__West__330__kV, Central__NSW__(CNSW) + """ + tables["nem_regions"] = csv_str_to_df(nem_regions_csv) + + # Renewable energy zones table + renewable_energy_zones_csv = """ + rez_id, isp_sub_region_id, carrier, wind_generation_total_limits_mw_high, wind_generation_total_limits_mw_medium, wind_generation_total_limits_mw_offshore_floating, wind_generation_total_limits_mw_offshore_fixed, solar_pv_plus_solar_thermal_limits_mw_solar, rez_solar_resource_limit_violation_penalty_factor_$/mw, rez_transmission_network_limit_summer_typical + Central-West__Orana__REZ, CNSW, AC, 3000, 2000, 0, 0, 1500, 10000, 4500 + New__England__REZ, NNSW, AC, 5000, 3500, 0, 0, 2000, 10000, 6000 + """ + tables["renewable_energy_zones"] = csv_str_to_df(renewable_energy_zones_csv) + + # Flow paths table + flow_paths_csv = """ + flow_path, carrier, node_from, node_to, forward_direction_mw_summer_typical, reverse_direction_mw_summer_typical + CNSW-NNSW, AC, CNSW, NNSW, 1000, 1000 + """ + tables["flow_paths"] = csv_str_to_df(flow_paths_csv) + + # ECAA generators table + ecaa_generators_csv = """ + generator, sub_region_id, region_id, fuel_type, maximum_capacity_mw + Bayswater, CNSW, NSW1, Black__Coal, 2640 + Eraring, CNSW, NSW1, Black__Coal, 2880 + Central-West__Orana__REZ__Wind, CNSW, NSW1, Wind, 0 + Central-West__Orana__REZ__Solar, CNSW, NSW1, Solar, 0 + New__England__REZ__Wind, NNSW, NSW1, Wind, 0 + New__England__REZ__Solar, NNSW, NSW1, Solar, 0 + """ + tables["ecaa_generators"] = csv_str_to_df(ecaa_generators_csv) + + # REZ group constraints LHS table + custom_constraints_lhs = """ + constraint_id, term_type, variable_name, coefficient + Central-West__Orana__REZ_Custom, generator_capacity, Central-West__Orana__REZ__Wind, 1.0 + Central-West__Orana__REZ_Custom, generator_capacity, Central-West__Orana__REZ__Solar, 1.0 + New__England__REZ_Custom, generator_capacity, New__England__REZ__Wind, 1.0 + New__England__REZ_Custom, generator_capacity, New__England__REZ__Solar, 1.0 + """ + tables["custom_constraints_lhs"] = csv_str_to_df(custom_constraints_lhs) + + # REZ group constraints RHS table + custom_constraints_rhs = """ + constraint_id, summer_typical + Central-West__Orana__REZ_Custom, 4500 + New__England__REZ_Custom, 6000 + """ + tables["custom_constraints_rhs"] = csv_str_to_df(custom_constraints_rhs) + + # Optional tables that might be needed for transmission expansion + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw, 2026_27_$/mw, 2027_28_$/mw + CNSW-NNSW, 500, 1200, 1250, 1300 + """ + tables["flow_path_expansion_costs"] = csv_str_to_df(flow_path_expansion_costs_csv) + + rez_transmission_expansion_costs_csv = """ + rez_constraint_id, additional_network_capacity_mw, 2025_26_$/mw, 2026_27_$/mw, 2027_28_$/mw + Central-West__Orana__REZ, 1000, 2000, 2100, 2200 + New__England__REZ, 1500, 2500, 2600, 2700 + Central-West__Orana__REZ_Custom, 1000, 2000, 2100, 2200 + New__England__REZ_Custom, 1500, 2500, 2600, 2700 + """ + tables["rez_transmission_expansion_costs"] = csv_str_to_df( + rez_transmission_expansion_costs_csv + ) + + return tables + + +@pytest.fixture +def sample_model_config(): + """ + Fixture that returns a sample ModelConfig for testing. + + This config uses: + - Sub-regional granularity with discrete REZ nodes + - Financial year type + - 2 investment periods (2024, 2030) + - REZ transmission expansion disabled to avoid empty dataframe issues + - Unserved energy enabled + """ + from ispypsa.config import ModelConfig + from ispypsa.config.validators import ( + NetworkConfig, + NodesConfig, + TemporalAggregationConfig, + TemporalCapacityInvestmentConfig, + TemporalConfig, + TemporalOperationalConfig, + TemporalRangeConfig, + UnservedEnergyConfig, + ) + + return ModelConfig( + ispypsa_run_name="test_run", + scenario="Step Change", + wacc=0.06, + discount_rate=0.05, + iasr_workbook_version="6.0", + solver="highs", + temporal=TemporalConfig( + path_to_parsed_traces="NOT_SET_FOR_TESTING", + year_type="fy", + range=TemporalRangeConfig(start_year=2026, end_year=2028), + capacity_expansion=TemporalCapacityInvestmentConfig( + reference_year_cycle=[2024], + resolution_min=30, + aggregation=TemporalAggregationConfig(representative_weeks=None), + investment_periods=[2026, 2028], + ), + operational=TemporalOperationalConfig( + reference_year_cycle=[2024], + resolution_min=30, + aggregation=TemporalAggregationConfig(representative_weeks=None), + horizon=24, + overlap=0, + ), + ), + network=NetworkConfig( + nodes=NodesConfig( + regional_granularity="sub_regions", rezs="discrete_nodes" + ), + annuitisation_lifetime=25, + transmission_expansion=True, + rez_transmission_expansion=True, + rez_to_sub_region_transmission_default_limit=1000000.0, + ), + unserved_energy=UnservedEnergyConfig(cost=10000, generator_size_mw=10000), + ) diff --git a/tests/test_config/test_pydantic_model_config.py b/tests/test_config/test_pydantic_model_config.py index 6c973e4..6e006b1 100644 --- a/tests/test_config/test_pydantic_model_config.py +++ b/tests/test_config/test_pydantic_model_config.py @@ -14,7 +14,11 @@ @pytest.mark.parametrize("year_type", ["fy", "calendar"]) @pytest.mark.parametrize("representative_weeks", [None, [0], [12, 20]]) def test_valid_config( - scenario, regional_granularity, nodes_rezs, year_type, representative_weeks + scenario, + regional_granularity, + nodes_rezs, + year_type, + representative_weeks, ): config = get_valid_config() @@ -46,7 +50,9 @@ def get_valid_config(): "discount_rate": 0.05, "network": { "transmission_expansion": True, + "transmission_expansion_limit_override": None, "rez_transmission_expansion": True, + "rez_connection_expansion_limit_override": None, "annuitisation_lifetime": 30, "nodes": { "regional_granularity": "sub_regions", diff --git a/tests/test_model/test_custom_constraints.py b/tests/test_model/test_custom_constraints.py index 05a8d4c..9be6cda 100644 --- a/tests/test_model/test_custom_constraints.py +++ b/tests/test_model/test_custom_constraints.py @@ -5,6 +5,7 @@ from ispypsa.data_fetch import read_csvs from ispypsa.model import build_pypsa_network +from ispypsa.model.custom_constraints import _add_custom_constraints def test_custom_constraints(): @@ -39,3 +40,316 @@ def test_custom_constraints(): network.optimize.solve_model() assert network.generators.loc["con_one-EXPANSION", "p_nom_opt"] == 1500.0 + + +def test_custom_constraints_greater_equal(csv_str_to_df): + """Test custom constraints with >= constraint type for Generator p_nom variables.""" + import pypsa + + # Create a simple network + network = pypsa.Network() + network.set_snapshots(pd.date_range("2025-01-01", periods=4, freq="h")) + + # Add buses + network.add("Bus", "bus1") + network.add("Bus", "bus2") + + # Add generators with extendable capacity + network.add( + "Generator", + "gen1", + bus="bus1", + p_nom_extendable=True, + capital_cost=100, + marginal_cost=50, + ) + network.add( + "Generator", + "gen2", + bus="bus2", + p_nom_extendable=True, + capital_cost=150, + marginal_cost=40, + ) + + # Add loads + network.loads_t.p_set = pd.DataFrame( + {"load1": [100, 150, 200, 250]}, index=network.snapshots + ) + network.add("Load", "load1", bus="bus1") + + # Define custom constraints using csv_str_to_df + custom_constraints_rhs_csv = """ + constraint_name, rhs, constraint_type + min_gen_capacity, 300, >= + """ + + custom_constraints_lhs_csv = """ + constraint_name, component, attribute, variable_name, coefficient + min_gen_capacity, Generator, p_nom, gen1, 1.0 + min_gen_capacity, Generator, p_nom, gen2, 1.0 + """ + + custom_constraints_rhs = csv_str_to_df(custom_constraints_rhs_csv) + custom_constraints_lhs = csv_str_to_df(custom_constraints_lhs_csv) + + # Apply custom constraints and solve + network.optimize.create_model() + _add_custom_constraints(network, custom_constraints_rhs, custom_constraints_lhs) + network.optimize.solve_model() + + # Check that the sum of generator capacities is at least 300 + total_capacity = network.generators.p_nom_opt.sum() + assert total_capacity >= 300.0 + + +def test_custom_constraints_equal_link_p_nom(csv_str_to_df): + """Test custom constraints with == constraint type for Link p_nom variables.""" + import pypsa + + # Create a simple network + network = pypsa.Network() + network.set_snapshots(pd.date_range("2025-01-01", periods=4, freq="h")) + + # Add buses + network.add("Bus", "bus1") + network.add("Bus", "bus2") + network.add("Bus", "bus3") + + # Add links with extendable capacity + network.add( + "Link", + "link1", + bus0="bus1", + bus1="bus2", + p_nom_extendable=True, + capital_cost=1000, + efficiency=0.95, + ) + network.add( + "Link", + "link2", + bus0="bus2", + bus1="bus3", + p_nom_extendable=True, + capital_cost=1200, + efficiency=0.95, + ) + + # Add generators and loads + network.add("Generator", "gen1", bus="bus1", p_nom=500, marginal_cost=30) + network.loads_t.p_set = pd.DataFrame( + {"load2": [50, 100, 150, 200], "load3": [100, 150, 200, 250]}, + index=network.snapshots, + ) + network.add("Load", "load1", bus="bus1") + network.add("Load", "load3", bus="bus3") + + # Define custom constraints - link capacities must be equal + custom_constraints_rhs_csv = """ + constraint_name, rhs, constraint_type + equal_link_capacity, 0, == + """ + + custom_constraints_lhs_csv = """ + constraint_name, component, attribute, variable_name, coefficient + equal_link_capacity, Link, p_nom, link1, 1.0 + equal_link_capacity, Link, p_nom, link2, -1.0 + """ + + custom_constraints_rhs = csv_str_to_df(custom_constraints_rhs_csv) + custom_constraints_lhs = csv_str_to_df(custom_constraints_lhs_csv) + + # Apply custom constraints and solve + network.optimize.create_model() + _add_custom_constraints(network, custom_constraints_rhs, custom_constraints_lhs) + network.optimize.solve_model() + + # Check that link capacities are equal + assert ( + abs( + network.links.loc["link1", "p_nom_opt"] + - network.links.loc["link2", "p_nom_opt"] + ) + < 0.01 + ) + + +def test_custom_constraints_mixed_components(csv_str_to_df): + """Test custom constraints with mixed component types (Generator p and Link p) with <= constraint.""" + import pypsa + + # Create a simple network + network = pypsa.Network() + network.set_snapshots(pd.date_range("2025-01-01", periods=4, freq="h")) + + # Add carriers + network.add("Carrier", "AC") + network.add("Carrier", "gas") + + # Add buses + network.add("Bus", "bus1") + network.add("Bus", "bus2") + + # Add generators + network.add( + "Generator", "gen1", bus="bus1", p_nom=200, marginal_cost=1, carrier="gas" + ) + network.add( + "Generator", "gen2", bus="bus2", p_nom=150, marginal_cost=35, carrier="gas" + ) + + # Add link + network.add( + "Link", + "link1", + bus0="bus1", + bus1="bus2", + p_nom=200, + efficiency=0.95, + carrier="AC", + ) + + # Add loads + network.loads_t.p_set = pd.DataFrame( + { + "load1": [10, 10, 10, 10], + "load2": [200, 200, 200, 200], + }, + index=network.snapshots, + ) + network.add("Load", "load1", bus="bus1") + network.add("Load", "load2", bus="bus2") + + # Define custom constraints - sum of gen1 and link1 power at each timestep <= 150 + custom_constraints_rhs_csv = """ + constraint_name, rhs, constraint_type + max_combined_power, 150, <= + """ + + custom_constraints_lhs_csv = """ + constraint_name, component, attribute, variable_name, coefficient + max_combined_power, Generator, p, gen1, 1.0 + max_combined_power, Link, p, link1, 1.0 + """ + + custom_constraints_rhs = csv_str_to_df(custom_constraints_rhs_csv) + custom_constraints_lhs = csv_str_to_df(custom_constraints_lhs_csv) + + # Apply custom constraints and solve + network.optimize.create_model() + _add_custom_constraints(network, custom_constraints_rhs, custom_constraints_lhs) + network.optimize.solve_model() + + # Check that combined power does not exceed 150 at any timestep + gen_power = network.generators_t.p["gen1"] + link_power = network.links_t.p0["link1"] + combined_power = gen_power + link_power + assert combined_power.max() <= 150.01 # Small tolerance for numerical precision + assert combined_power.max() >= 149.9 # Small tolerance for numerical precision + + +def test_custom_constraints_multiple_types(csv_str_to_df): + """Test multiple custom constraints with different constraint types.""" + import pypsa + + # Create a simple network + network = pypsa.Network() + network.set_snapshots(pd.date_range("2025-01-01", periods=4, freq="h")) + + # Add buses + network.add("Bus", "bus1") + network.add("Bus", "bus2") + network.add("Bus", "bus3") + + # Add generators with extendable capacity + network.add( + "Generator", + "gen1", + bus="bus1", + p_nom_extendable=True, + capital_cost=100, + marginal_cost=50, + ) + network.add( + "Generator", + "gen2", + bus="bus2", + p_nom_extendable=True, + capital_cost=120, + marginal_cost=45, + ) + + # Add links with extendable capacity + network.add( + "Link", + "link1", + bus0="bus1", + bus1="bus3", + p_nom_extendable=True, + capital_cost=1000, + efficiency=0.95, + ) + network.add( + "Link", + "link2", + bus0="bus2", + bus1="bus3", + p_nom_extendable=True, + capital_cost=1100, + efficiency=0.95, + ) + + # Add loads + network.loads_t.p_set = pd.DataFrame( + { + "load1": [100, 150, 200, 250], + "load2": [80, 120, 160, 200], + "load3": [150, 200, 200, 200], + }, + index=network.snapshots, + ) + network.add("Load", "load1", bus="bus1") + network.add("Load", "load2", bus="bus2") + network.add("Load", "load3", bus="bus3") + + # Define multiple custom constraints with different types + custom_constraints_rhs_csv = """ + constraint_name, rhs, constraint_type + min_total_gen, 400, >= + max_link_sum, 300, <= + gen_ratio, 0, == + """ + + custom_constraints_lhs_csv = """ + constraint_name, component, attribute, variable_name, coefficient + min_total_gen, Generator, p_nom, gen1, 1.0 + min_total_gen, Generator, p_nom, gen2, 1.0 + max_link_sum, Link, p_nom, link1, 1.0 + max_link_sum, Link, p_nom, link2, 1.0 + gen_ratio, Generator, p_nom, gen1, 1.0 + gen_ratio, Generator, p_nom, gen2, -1.2 + """ + + custom_constraints_rhs = csv_str_to_df(custom_constraints_rhs_csv) + custom_constraints_lhs = csv_str_to_df(custom_constraints_lhs_csv) + + # Apply custom constraints and solve + network.optimize.create_model() + _add_custom_constraints(network, custom_constraints_rhs, custom_constraints_lhs) + network.optimize.solve_model() + + # Check all constraints are satisfied + # 1. Minimum total generation capacity >= 400 + total_gen = network.generators.p_nom_opt.sum() + assert total_gen >= 399.99 # Small tolerance + + # 2. Maximum link capacity sum <= 500 + total_link = network.links.p_nom_opt.sum() + assert total_link <= 300.01 # Small tolerance + assert total_link <= 299.9 # Small tolerance + + # 3. Generator ratio: gen1 = 1.2 * gen2 + gen1_cap = network.generators.loc["gen1", "p_nom_opt"] + gen2_cap = network.generators.loc["gen2", "p_nom_opt"] + assert abs(gen1_cap - 1.2 * gen2_cap) < 0.01 # Small tolerance diff --git a/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/custom_constraints_lhs.csv b/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/custom_constraints_lhs.csv index 2a8f2ae..1207bea 100644 --- a/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/custom_constraints_lhs.csv +++ b/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/custom_constraints_lhs.csv @@ -1,4 +1,4 @@ ,constraint_name,component,attribute,variable_name,coefficient -0,con_one,Line,s,line_one,1.0 +0,con_one,Link,p,link_one,1.0 1,con_one,Generator,p,Bayswater,1.0 2,con_one,Generator,p_nom,con_one-EXPANSION,-1.0 diff --git a/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/custom_constraints_rhs.csv b/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/custom_constraints_rhs.csv index 459fc5d..3311c96 100644 --- a/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/custom_constraints_rhs.csv +++ b/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/custom_constraints_rhs.csv @@ -1,2 +1,2 @@ -,constraint_name,rhs -0,con_one,500 +,constraint_name,rhs,constraint_type +0,con_one,500,<= diff --git a/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/lines.csv b/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/lines.csv deleted file mode 100644 index 786a319..0000000 --- a/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/lines.csv +++ /dev/null @@ -1,2 +0,0 @@ -name,bus0,bus1,s_nom,s_nom_extendable,capital_cost -line_one,bus_one,bus_two,1000.0,False,0.0 diff --git a/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/links.csv b/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/links.csv new file mode 100644 index 0000000..411bc77 --- /dev/null +++ b/tests/test_model/test_pypsa_friendly_inputs/test_custom_constraints/links.csv @@ -0,0 +1,2 @@ +name,bus0,bus1,p_nom,p_nom_extendable,capital_cost +link_one,bus_one,bus_two,1000.0,False,0.0 diff --git a/tests/test_model/test_updating_time_series.py b/tests/test_model/test_updating_time_series.py index 32815df..9554f6c 100644 --- a/tests/test_model/test_updating_time_series.py +++ b/tests/test_model/test_updating_time_series.py @@ -112,8 +112,6 @@ def test_expand_then_operate(): } ) - lines = pd.DataFrame() # Empty for one node model - investment_period_weights = pd.DataFrame( { "period": [2025, 2026], @@ -132,7 +130,6 @@ def test_expand_then_operate(): "snapshots": capacity_expansion_snapshots, "buses": buses, "generators": generators, - "lines": lines, "investment_period_weights": investment_period_weights, "custom_constraints_lhs": custom_constraints_lhs, "custom_constraints_rhs": custom_constraints_rhs, diff --git a/tests/test_templater/test_flow_path_costs.py b/tests/test_templater/test_flow_path_costs.py new file mode 100644 index 0000000..176b23a --- /dev/null +++ b/tests/test_templater/test_flow_path_costs.py @@ -0,0 +1,555 @@ +import numpy as np +import pandas as pd +import pytest + +from ispypsa.templater.flow_paths import ( + _find_first_year_with_complete_costs, + _get_actionable_projects_table, + _get_augmentation_table, + _get_cost_table, + _get_least_cost_options, + _get_prep_activities_table, + _template_sub_regional_flow_path_costs, +) +from ispypsa.templater.mappings import _FLOW_PATH_CONFIG, _REZ_CONFIG + + +def test_template_sub_regional_flow_path_costs_simple_least_cost_option(): + # Augmentation tables for NNSW-SQ and TAS-VIC + aug_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2", "NNSW–SQ Option 5"], + "forward_capacity_increase": [100, 200, 40], + "reverse_capacity_increase": [90, 140, 50], + } + ) + aug_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": [ + "TAS-VIC Option 1 (Project Marinus Stage 1)", + "TAS-VIC Option 2 (Project Marinus Stage 2)", + ], + "forward_capacity_increase": [130, 70], + "reverse_capacity_increase": [150, 65], + } + ) + # Cost tables for NNSW-SQ and TAS-VIC + # Option 2 is least cost and has the largest increase so should be chosen. + cost_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2"], + "2024_25": [50, 40], + "2025_26": [55, 45], + } + ) + # Option 1 is least cost and has the largest increase so should be chosen. + cost_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": [ + "TAS-VIC Option 1 (Project Marinus Stage 1)", + "TAS-VIC Option 2 (Project Marinus Stage 2)", + ], + "2024_25": [70, np.nan], # actionable ISP option has NaN + "2025_26": [75, np.nan], + } + ) + # Preparatory activities and actionable ISP tables (should not be chosen) + # Note: ISPyPSA contains internal mappings which match the names used in Preparatory + # and actionable isp cost tables to the names used in the augmentation tables. + prep_acts = pd.DataFrame( + { + "Flow path": ["500kV QNI Connect (NSW works)"], + "2024_25": [100], + "2025_26": [110], + } + ) + actionable_isp = pd.DataFrame( + { + "Flow path": ["Project Marinus Stage 1"], + "2024_25": [999], + "2025_26": [999], + } + ) + # Compose iasr_tables dict + iasr_tables = { + "flow_path_augmentation_options_NNSW-SQ": aug_table_nnsw_sq, + "flow_path_augmentation_options_TAS-VIC": aug_table_tas_vic, + "flow_path_augmentation_costs_progressive_change_NNSW-SQ": cost_table_nnsw_sq, + "flow_path_augmentation_costs_progressive_change_TAS-VIC": cost_table_tas_vic, + "flow_path_augmentation_costs_progressive_change_preparatory_activities": prep_acts, + "flow_path_augmentation_costs_progressive_change_actionable_isp_projects": actionable_isp, + } + scenario = "Progressive Change" + # Run function + result = _template_sub_regional_flow_path_costs(iasr_tables, scenario) + # Check least cost options are chosen for NNSW-SQ and TAS-VIC + nnsw_sq_row = result[result["flow_path"] == "NNSW-SQ"] + tas_vic_row = result[result["flow_path"] == "TAS-VIC"] + assert nnsw_sq_row["option"].iloc[0] == "NNSW-SQ Option 2" + assert tas_vic_row["option"].iloc[0] == "TAS-VIC Option 1 (Project Marinus Stage 1)" + # Check nominal_flow_limit_increase_mw is correct + assert nnsw_sq_row["additional_network_capacity_mw"].iloc[0] == 200 + assert tas_vic_row["additional_network_capacity_mw"].iloc[0] == 150 + # Check cost per year column is correct (cost divided by nominal limit) + # For NNSW-SQ Option 2: 2024_25 = 40/200 = 0.2, 2025_26 = 45/200 = 0.225 + # For TAS-VIC Option 1: 2024_25 = 70/150 ≈ 0.4667, 2025_26 = 75/150 = 0.5 + assert abs(nnsw_sq_row["2024_25_$/mw"].iloc[0] - 0.2) < 1e-6 + assert abs(nnsw_sq_row["2025_26_$/mw"].iloc[0] - 0.225) < 1e-6 + assert abs(tas_vic_row["2024_25_$/mw"].iloc[0] - (70 / 150)) < 1e-6 + assert abs(tas_vic_row["2025_26_$/mw"].iloc[0] - 0.5) < 1e-6 + + +def test_template_sub_regional_flow_path_costs_prep_and_actionable_chosen(): + """ + The cost of the non preparatory activities and non actionable isp projects + have been made very high and therefore preparatory activities and + actionable isp projects should be chosen. + """ + # Augmentation tables for NNSW-SQ and TAS-VIC + aug_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2", "NNSW–SQ Option 5"], + "forward_capacity_increase": [100, 150, 200], + "reverse_capacity_increase": [100, 150, 150], + } + ) + aug_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": [ + "TAS-VIC Option 1 (Project Marinus Stage 1)", + "TAS-VIC Option 2 (Project Marinus Stage 2)", + ], + "forward_capacity_increase": [140, 150], + "reverse_capacity_increase": [145, 130], + } + ) + # Standard cost tables (set high or NaN) + cost_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2"], + "2024_25": [1000, 1000], + "2025_26": [1000, 1000], + } + ) + cost_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": [ + "TAS-VIC Option 1 (Project Marinus Stage 1)", + "TAS-VIC Option 2 (Project Marinus Stage 2)", + ], + "2024_25": [1000, np.nan], + "2025_26": [1000, np.nan], + } + ) + # Preparatory activities and actionable ISP tables (set low cost) + # Note: ISPyPSA contains internal mappings which match the names used in Preparatory + # and actionable isp cost tables to the names used in the augmentation tables. + prep_acts = pd.DataFrame( + { + "Flow path": ["500kV QNI Connect (NSW works)"], + "2024-25": [10], + "2025-26": [20], + } + ) + actionable_isp = pd.DataFrame( + { + "Flow path": ["Project Marinus Stage 2"], + "2024-25": [15], + "2025-26": [25], + } + ) + # Compose iasr_tables dict + iasr_tables = { + "flow_path_augmentation_options_NNSW-SQ": aug_table_nnsw_sq, + "flow_path_augmentation_options_TAS-VIC": aug_table_tas_vic, + "flow_path_augmentation_costs_progressive_change_NNSW-SQ": cost_table_nnsw_sq, + "flow_path_augmentation_costs_progressive_change_TAS-VIC": cost_table_tas_vic, + "flow_path_augmentation_costs_progressive_change_preparatory_activities": prep_acts, + "flow_path_augmentation_costs_progressive_change_actionable_isp_projects": actionable_isp, + } + scenario = "Progressive Change" + # Run function + result = _template_sub_regional_flow_path_costs(iasr_tables, scenario) + # Check that the prep activity is chosen for NNSW-SQ and actionable ISP for TAS-VIC + nnsw_sq_row = result[result["flow_path"] == "NNSW-SQ"] + tas_vic_row = result[result["flow_path"] == "TAS-VIC"] + assert nnsw_sq_row["option"].iloc[0] == "NNSW–SQ Option 5" + assert tas_vic_row["option"].iloc[0] == "TAS-VIC Option 2 (Project Marinus Stage 2)" + # Check nominal_flow_limit_increase_mw is correct + assert nnsw_sq_row["additional_network_capacity_mw"].iloc[0] == 200 + assert tas_vic_row["additional_network_capacity_mw"].iloc[0] == 150 + # Check cost per year column is correct (cost divided by nominal limit) + assert abs(nnsw_sq_row["2024_25_$/mw"].iloc[0] - (10 / 200)) < 1e-6 + assert abs(nnsw_sq_row["2025_26_$/mw"].iloc[0] - (20 / 200)) < 1e-6 + assert abs(tas_vic_row["2024_25_$/mw"].iloc[0] - (15 / 150)) < 1e-6 + assert abs(tas_vic_row["2025_26_$/mw"].iloc[0] - (25 / 150)) < 1e-6 + + +def test_template_sub_regional_flow_path_costs_use_first_year_with_valid_costs(): + """ + Test that the first year with non-nan cost data for all options is used. + """ + # NNSW-SQ: only 2025_26 has all non-nan costs + aug_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2", "NNSW-SQ Option 3"], + "forward_capacity_increase": [150, 200, 200], + "reverse_capacity_increase": [200, 150, 150], + } + ) + # Even though option 3 is cheaper than option 2 in 2024_25, option 2 should get + # chosen because 2025_26 is used as the comparison year. + cost_table_nnsw_sq = pd.DataFrame( + { + "Flow path": ["NNSW-SQ", "NNSW-SQ", "NNSW-SQ"], + "Option Name": ["NNSW-SQ Option 1", "NNSW-SQ Option 2", "NNSW-SQ Option 3"], + "2024_25": [np.nan, 50, 10], + "2025_26": [35, 45, 50], + } + ) + # TAS-VIC: all years have valid costs + aug_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": ["TAS-VIC Option 1", "TAS-VIC Option 2"], + "forward_capacity_increase": [90, 100], + "reverse_capacity_increase": [100, 90], + } + ) + cost_table_tas_vic = pd.DataFrame( + { + "Flow path": ["TAS-VIC", "TAS-VIC"], + "Option Name": ["TAS-VIC Option 1", "TAS-VIC Option 2"], + "2024_25": [100, 10], + "2025_26": [10, 100], + } + ) + iasr_tables = { + "flow_path_augmentation_options_NNSW-SQ": aug_table_nnsw_sq, + "flow_path_augmentation_options_TAS-VIC": aug_table_tas_vic, + "flow_path_augmentation_costs_progressive_change_NNSW-SQ": cost_table_nnsw_sq, + "flow_path_augmentation_costs_progressive_change_TAS-VIC": cost_table_tas_vic, + } + scenario = "Progressive Change" + result = _template_sub_regional_flow_path_costs(iasr_tables, scenario) + # NNSW-SQ: Only 2025_26 has all non-nan costs, so selection is based on that year for all years + nnsw_sq_row = result[result["flow_path"] == "NNSW-SQ"] + assert nnsw_sq_row["option"].iloc[0] == "NNSW-SQ Option 1" + assert nnsw_sq_row["additional_network_capacity_mw"].iloc[0] == 200 + assert np.isnan(nnsw_sq_row["2024_25_$/mw"].iloc[0]) + assert abs(nnsw_sq_row["2025_26_$/mw"].iloc[0] - (35 / 200)) < 1e-6 + # TAS-VIC: both years valid, Option 2 is the least cost only in first, + # but should be chosen on this basis. + tas_vic_row = result[result["flow_path"] == "TAS-VIC"] + assert tas_vic_row["option"].iloc[0] == "TAS-VIC Option 2" + assert tas_vic_row["additional_network_capacity_mw"].iloc[0] == 100 + assert abs(tas_vic_row["2024_25_$/mw"].iloc[0] - (10 / 100)) < 1e-6 + assert abs(tas_vic_row["2025_26_$/mw"].iloc[0] - (100 / 100)) < 1e-6 + + +def test_get_least_cost_options_logs_unmatched(caplog): + """ + Test that _get_least_cost_options logs dropped flow_path/option_name pairs from both tables. + """ + # Augmentation table has one extra option not in cost table + aug_table = pd.DataFrame( + { + "id": ["A", "A", "B"], + "option": ["opt1", "opt2", "opt3"], + "nominal_capacity_increase": [100, 200, 300], + } + ) + # Cost table has one extra option not in aug table + cost_table = pd.DataFrame( + { + "id": ["A", "A", "B"], + "option": ["opt1", "opt2", "opt4"], + "2024_25": [10, 20, 30], + "2025_26": [15, 25, 35], + } + ) + # Only the (B, opt3) and (B, opt4) pairs should be dropped + with caplog.at_level("INFO"): + result = _get_least_cost_options(aug_table, cost_table, _FLOW_PATH_CONFIG) + # Check logs for both dropped pairs + assert "Dropped options from augmentation table: [('B', 'opt3')]" in caplog.text + assert "Dropped options from cost table: [('B', 'opt4')]" in caplog.text + + +def test_get_full_flow_path_aug_table_logs_missing_tables(caplog): + """ + Test that _get_augmentation_table logs a warning when augmentation tables are missing. + """ + # Only provide one of the required augmentation tables + present_table = _FLOW_PATH_CONFIG["table_names"]["augmentation"][0] + iasr_tables = { + present_table: pd.DataFrame( + { + "Flow path": ["A"], + "Option Name": ["opt1"], + "forward_capacity_increase": [100], + "reverse_capacity_increase": [90], + } + ) + } + missing = [ + t + for t in _FLOW_PATH_CONFIG["table_names"]["augmentation"] + if t != present_table + ] + with caplog.at_level("WARNING"): + _get_augmentation_table(iasr_tables, _FLOW_PATH_CONFIG) + # Check that the warning about missing tables is logged + assert f"Missing augmentation tables: {missing}" in caplog.text + + +def test_get_cleaned_flow_path_cost_tables_logs_missing_tables(caplog): + """ + Test that _get_cost_table logs a warning when cost tables are missing. + """ + # Only provide one of the required cost tables + cost_scenario = "progressive_change" + cost_table_names = _FLOW_PATH_CONFIG["table_names"]["cost"][cost_scenario] + present_table = cost_table_names[0] + iasr_tables = { + present_table: pd.DataFrame( + { + "id": ["A"], + "option": ["opt1"], + "2024_25": [10], + } + ) + } + missing = [t for t in cost_table_names if t != present_table] + with caplog.at_level("WARNING"): + _get_cost_table(iasr_tables, cost_scenario, _FLOW_PATH_CONFIG) + # Check that the warning about missing tables is logged + assert f"Missing cost tables: {missing}" in caplog.text + + +def test_template_sub_regional_flow_path_costs_invalid_scenario(): + """Test that an invalid scenario raises a ValueError.""" + iasr_tables = { + "flow_path_augmentation_options_NNSW-SQ": pd.DataFrame( + { + "Flow path": ["NNSW-SQ"], + "Option Name": ["Option 1"], + "forward_capacity_increase": [100], + "reverse_capacity_increase": [100], + } + ) + } + + # Test with invalid scenario + with pytest.raises(ValueError, match="scenario: Invalid Scenario not recognised"): + _template_sub_regional_flow_path_costs(iasr_tables, "Invalid Scenario") + + +def test_template_sub_regional_flow_path_costs_no_augmentation_tables(): + """Test that missing all augmentation tables raises a ValueError.""" + iasr_tables = {} # No tables at all + + with pytest.raises(ValueError, match="No flow_path augmentation tables found"): + _template_sub_regional_flow_path_costs(iasr_tables, "Step Change") + + +def test_template_sub_regional_flow_path_costs_no_cost_tables(): + """Test that missing all cost tables raises a ValueError.""" + # Only augmentation tables, no cost tables + iasr_tables = { + "flow_path_augmentation_options_NNSW-SQ": pd.DataFrame( + { + "Flow path": ["NNSW-SQ"], + "Option Name": ["Option 1"], + "forward_capacity_increase": [100], + "reverse_capacity_increase": [100], + } + ) + } + + with pytest.raises(ValueError, match="No cost tables found"): + _template_sub_regional_flow_path_costs(iasr_tables, "Step Change") + + +def test_template_sub_regional_flow_path_costs_no_year_columns(): + """Test that cost tables without year columns raise a ValueError.""" + iasr_tables = { + "flow_path_augmentation_options_NNSW-SQ": pd.DataFrame( + { + "Flow path": ["NNSW-SQ"], + "Option Name": ["Option 1"], + "forward_capacity_increase": [100], + "reverse_capacity_increase": [100], + } + ), + "flow_path_augmentation_costs_step_change_and_green_energy_exports_NNSW-SQ": pd.DataFrame( + { + "Flow path": ["NNSW-SQ"], + "Option Name": ["Option 1"], + # No year columns, only metadata + } + ), + } + + with pytest.raises(ValueError, match="No financial year columns found"): + _template_sub_regional_flow_path_costs(iasr_tables, "Step Change") + + +def test_get_prep_activities_missing_flow_path_mapping(csv_str_to_df): + """Test that missing flow path mapping in preparatory activities raises ValueError.""" + # Create preparatory activities with an unmapped flow path + prep_acts_csv = """ + Flow__path, 2024-25, 2025-26 + Unknown__Flow__Path, 100, 110 + """ + prep_acts = csv_str_to_df(prep_acts_csv) + + iasr_tables = { + "flow_path_augmentation_costs_progressive_change_preparatory_activities": prep_acts + } + + # This should raise ValueError about missing mapping + with pytest.raises( + ValueError, + match="Missing mapping values for the flow paths provided: \\['Unknown Flow Path'\\]", + ): + _get_prep_activities_table(iasr_tables, "progressive_change", _FLOW_PATH_CONFIG) + + +def test_get_prep_activities_missing_option_name_mapping(csv_str_to_df): + """Test that missing option name mapping in preparatory activities raises ValueError.""" + # Create a custom config with incomplete mappings + custom_config = _FLOW_PATH_CONFIG.copy() + custom_config["mappings"] = { + "prep_activities_name_to_option": { + "Test Flow Path": "Test Option" # This will map fine + }, + "option_to_id": { + # Missing "Test Option" mapping - this will cause the error + }, + } + + prep_acts_csv = """ + Flow__path, 2024-25, 2025-26 + Test__Flow__Path, 100, 110 + """ + prep_acts = csv_str_to_df(prep_acts_csv) + + iasr_tables = { + "flow_path_augmentation_costs_progressive_change_preparatory_activities": prep_acts + } + + # This should raise ValueError about missing option name mapping + with pytest.raises( + ValueError, + match="Missing mapping values for the option names provided: \\['Test Option'\\]", + ): + _get_prep_activities_table(iasr_tables, "progressive_change", custom_config) + + +def test_get_actionable_projects_missing_flow_path_mapping(csv_str_to_df): + """Test that missing flow path mapping in actionable projects raises ValueError.""" + # Create actionable projects with an unmapped flow path + actionable_csv = """ + Flow__path, 2024-25, 2025-26 + Unknown__Project, 999, 999 + """ + actionable = csv_str_to_df(actionable_csv) + + iasr_tables = { + "flow_path_augmentation_costs_progressive_change_actionable_isp_projects": actionable + } + + # This should raise ValueError about missing mapping + with pytest.raises( + ValueError, + match="Missing mapping values for the flow paths provided: \\['Unknown Project'\\]", + ): + _get_actionable_projects_table( + iasr_tables, "progressive_change", _FLOW_PATH_CONFIG + ) + + +def test_get_actionable_projects_missing_option_name_mapping(csv_str_to_df): + """Test that missing option name mapping in actionable projects raises ValueError.""" + # Create a custom config with incomplete mappings + custom_config = _FLOW_PATH_CONFIG.copy() + custom_config["mappings"] = { + "actionable_name_to_option": { + "Test Project": "Test Option" # This will map fine + }, + "actionable_option_to_id": { + # Missing "Test Option" mapping - this will cause the error + }, + } + + actionable_csv = """ + Flow__path, 2024-25, 2025-26 + Test__Project, 999, 999 + """ + actionable = csv_str_to_df(actionable_csv) + + iasr_tables = { + "flow_path_augmentation_costs_progressive_change_actionable_isp_projects": actionable + } + + # This should raise ValueError about missing option name mapping + with pytest.raises( + ValueError, + match="Missing mapping values for the option names provided: \\['Test Option'\\]", + ): + _get_actionable_projects_table(iasr_tables, "progressive_change", custom_config) + + +def test_template_rez_transmission_costs_missing_rez_mapping(csv_str_to_df): + """Test that missing REZ mapping in preparatory activities raises ValueError for REZ config.""" + + # Create REZ preparatory activities with an unmapped REZ + prep_acts_csv = """ + REZ__Name, Option, 2024-25, 2025-26 + Unknown__REZ, Option 1, 100, 110 + """ + prep_acts = csv_str_to_df(prep_acts_csv) + + iasr_tables = { + "rez_augmentation_costs_progressive_change_preparatory_activities": prep_acts + } + + # This should raise ValueError about missing REZ mapping + with pytest.raises( + ValueError, + match="Missing mapping values for the REZ names provided: \\['Unknown REZ'\\]", + ): + _get_prep_activities_table(iasr_tables, "progressive_change", _REZ_CONFIG) + + +def test_find_first_year_with_complete_costs_raises_error_when_no_year_all_non_na( + csv_str_to_df, +): + # Create REZ preparatory activities with an unmapped REZ + prep_acts_csv = """ + id, option, 2024_25, 2025_26 + A, x, 100, 110 + A, y, 100, 110 + B, x, 100, + B, y, , 110 + """ + cost_data = csv_str_to_df(prep_acts_csv) + + year_cols = ["2024_25", "2025_26"] + + # This should raise ValueError about missing REZ mapping + with pytest.raises( + ValueError, + match="No year found with all non-NA costs for transmissions: \\['B'\\]", + ): + _find_first_year_with_complete_costs(cost_data, year_cols) diff --git a/tests/test_templater/test_flow_paths.py b/tests/test_templater/test_flow_paths.py index ad51e7f..4e5ca95 100644 --- a/tests/test_templater/test_flow_paths.py +++ b/tests/test_templater/test_flow_paths.py @@ -1,10 +1,12 @@ from pathlib import Path +import numpy as np import pandas as pd from ispypsa.templater import load_manually_extracted_tables from ispypsa.templater.flow_paths import ( _template_regional_interconnectors, + _template_sub_regional_flow_path_costs, _template_sub_regional_flow_paths, ) @@ -17,28 +19,13 @@ def test_flow_paths_templater_regional(workbook_table_cache_test_path: Path): flow_paths_template = _template_regional_interconnectors( interconnector_capabilities ) + assert all([carrier == "AC" for carrier in flow_paths_template.carrier]) assert all( [ - True - for carrier in flow_paths_template.carrier - if (carrier == "AC" or carrier == "DC") - ] - ) - assert len(flow_paths_template[flow_paths_template.carrier == "DC"]) == 3 - assert all( - [ - True + dtype == np.int64 for dtype in flow_paths_template[ [col for col in flow_paths_template.columns if "mw" in col] ].dtypes - if dtype is int - ] - ) - assert all( - [ - True - for name in ("QNI", "Terranora", "Heywood", "Murraylink", "Basslink") - if name in flow_paths_template.flow_path_name ] ) assert len(flow_paths_template) == 6 @@ -50,33 +37,17 @@ def test_flow_paths_templater_sub_regional(workbook_table_cache_test_path: Path) "flow_path_transfer_capability.csv" ) flow_path_transfer_capability = pd.read_csv(filepath) - manual_tables = load_manually_extracted_tables("6.0") flow_paths_template = _template_sub_regional_flow_paths( - flow_path_transfer_capability, manual_tables["transmission_expansion_costs"] + flow_path_transfer_capability ) + assert all([carrier == "AC" for carrier in flow_paths_template.carrier]) assert all( [ - True - for carrier in flow_paths_template.carrier - if (carrier == "AC" or carrier == "DC") - ] - ) - assert len(flow_paths_template[flow_paths_template.carrier == "DC"]) == 3 - assert all( - [ - True + dtype == np.int64 for dtype in flow_paths_template[ [col for col in flow_paths_template.columns if "mw" in col] ].dtypes - if dtype is int - ] - ) - assert all( - [ - True - for name in ("QNI", "Terranora", "Heywood", "Murraylink", "Basslink") - if name in flow_paths_template.flow_path_name ] ) - assert len(flow_paths_template) == 14 + assert len(flow_paths_template) == 12 assert len(flow_paths_template.columns) == 6 diff --git a/tests/test_templater/test_renewable_energy_zones.py b/tests/test_templater/test_renewable_energy_zones.py index 5960314..65194cc 100644 --- a/tests/test_templater/test_renewable_energy_zones.py +++ b/tests/test_templater/test_renewable_energy_zones.py @@ -47,6 +47,3 @@ def test_renewable_energy_zone_build_limits(workbook_table_cache_test_path: Path # assert pd.Series( # build_limits.rez_transmission_network_limit_winter_reference.values # ).equals(pd.Series([np.nan, 700.0, 3000.0, 2000.0, np.nan, 0.0])) - assert pd.Series( - build_limits["indicative_transmission_expansion_cost_$/mw"].values - ).equals(pd.Series([1420000.0, 430000.0, 700000.0, np.nan, np.nan, 1000000.0])) diff --git a/tests/test_templater/test_rez_transmission_costs.py b/tests/test_templater/test_rez_transmission_costs.py new file mode 100644 index 0000000..c2b9882 --- /dev/null +++ b/tests/test_templater/test_rez_transmission_costs.py @@ -0,0 +1,240 @@ +import numpy as np +import pandas as pd +import pytest + +from ispypsa.templater.flow_paths import ( + _get_augmentation_table, + _get_cost_table, + _get_least_cost_options, + _template_rez_transmission_costs, + process_transmission_costs, +) +from ispypsa.templater.mappings import ( + _REZ_CONFIG, + _REZ_PREPATORY_ACTIVITIES_NAME_TO_REZ_AND_OPTION_NAME, +) + + +def test_template_rez_transmission_costs_simple_least_cost_option(): + # Augmentation tables for SWQLD1 and SWV1 REZs + aug_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "Additional network capacity (MW)": [100, 200, 40], + } + ) + aug_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1", "SWV1"], + "Option": [ + "Option 1A", + "Option 1B", + "Option 2A", + ], + "Additional network capacity (MW)": [150, 70, 120], + } + ) + # Cost tables for SWQLD1 and SWV1 REZs + # Option 2 is least cost and has the largest increase so should be chosen. + cost_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "2024_25": [50, 40, 60], + "2025_26": [55, 45, 65], + } + ) + # Option 1A is least cost and has the largest increase so should be chosen. + cost_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1", "SWV1"], + "Option": ["Option 1A", "Option 1B", "Option 2A"], + "2024_25": [70, 80, 100], + "2025_26": [75, 85, 110], + } + ) + # Preparatory activities table (should not be chosen due to higher costs) + # Using entries that exist in _REZ_PREPATORY_ACTIVITIES_NAME_TO_REZ_AND_OPTION_NAME + prep_acts = pd.DataFrame( + { + "REZ": [ + "Darling Downs REZ Expansion(Stage 1)", + "South West Victoria REZ Option 1A", + ], + "2024_25": [100, 110], + "2025_26": [110, 120], + } + ) + + # Compose iasr_tables dict with correct table names + iasr_tables = { + "rez_augmentation_options_QLD": aug_table_swqld, + "rez_augmentation_options_VIC": aug_table_swv, + "rez_augmentation_costs_progressive_change_QLD": cost_table_swqld, + "rez_augmentation_costs_progressive_change_VIC": cost_table_swv, + "rez_augmentation_costs_progressive_change_preparatory_activities": prep_acts, + } + scenario = "Progressive Change" + # Run function + result = _template_rez_transmission_costs(iasr_tables, scenario, ["SWV1", "SWQLD1"]) + # Check least cost options are chosen for SWQLD1 and SWV1 + swqld_row = result[result["rez_constraint_id"] == "SWQLD1"] + swv_row = result[result["rez_constraint_id"] == "SWV1"] + assert swqld_row["option"].iloc[0] == "Option 2" + assert swv_row["option"].iloc[0] == "Option 1A" + # Check additional_network_capacity_mw is correct + assert swqld_row["additional_network_capacity_mw"].iloc[0] == 200 + assert swv_row["additional_network_capacity_mw"].iloc[0] == 150 + # Check cost per year column is correct (cost divided by capacity) + # For SWQLD1 Option 2: 2024_25 = 40/200 = 0.2, 2025_26 = 45/200 = 0.225 + # For SWV1 Option 1A: 2024_25 = 70/150 ≈ 0.4667, 2025_26 = 75/150 = 0.5 + assert abs(swqld_row["2024_25_$/mw"].iloc[0] - 0.2) < 1e-6 + assert abs(swqld_row["2025_26_$/mw"].iloc[0] - 0.225) < 1e-6 + assert abs(swv_row["2024_25_$/mw"].iloc[0] - (70 / 150)) < 1e-6 + assert abs(swv_row["2025_26_$/mw"].iloc[0] - 0.5) < 1e-6 + + +def test_template_rez_transmission_costs_prep_activities_chosen(): + """ + The cost of the non preparatory activities have been made very high + and therefore preparatory activities should be chosen. + """ + # Augmentation tables for SWQLD1 and SWV1 REZs + aug_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "Additional network capacity (MW)": [100, 150, 200], + } + ) + aug_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1", "SWV1"], + "Option": ["Option 1A", "Option 1B", "Option 2A"], + "Additional network capacity (MW)": [140, 150, 160], + } + ) + # Standard cost tables - options that have costs in prep activities should have NaN here + cost_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "2024_25": [ + np.nan, + 1000, + 1000, + ], # Option 1 has NaN since it's in prep activities + "2025_26": [np.nan, 1000, 1000], + } + ) + cost_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1", "SWV1"], + "Option": ["Option 1A", "Option 1B", "Option 2A"], + "2024_25": [ + 1000, + 1000, + np.nan, + ], # Option 2A has NaN since it's in prep activities + "2025_26": [1000, 1000, np.nan], + } + ) + # Preparatory activities table (set low cost) + # Using entries that exist in _REZ_PREPATORY_ACTIVITIES_NAME_TO_REZ_AND_OPTION_NAME + prep_acts = pd.DataFrame( + { + "REZ": [ + "Darling Downs REZ Expansion(Stage 1)", + "South West Victoria REZ Option 2A", + ], + "2024_25": [10, 15], + "2025_26": [20, 25], + } + ) + + # Compose iasr_tables dict + iasr_tables = { + "rez_augmentation_options_QLD": aug_table_swqld, + "rez_augmentation_options_VIC": aug_table_swv, + "rez_augmentation_costs_progressive_change_QLD": cost_table_swqld, + "rez_augmentation_costs_progressive_change_VIC": cost_table_swv, + "rez_augmentation_costs_progressive_change_preparatory_activities": prep_acts, + } + scenario = "Progressive Change" + # Run function + result = _template_rez_transmission_costs(iasr_tables, scenario, ["SWV1", "SWQLD1"]) + # Check that the prep activity is chosen for SWQLD1 and SWV1 + swqld_row = result[result["rez_constraint_id"] == "SWQLD1"] + swv_row = result[result["rez_constraint_id"] == "SWV1"] + assert swqld_row["option"].iloc[0] == "Option 1" + assert swv_row["option"].iloc[0] == "Option 2A" + # Check additional_network_capacity_mw is correct + assert swqld_row["additional_network_capacity_mw"].iloc[0] == 100 + assert swv_row["additional_network_capacity_mw"].iloc[0] == 160 + # Check cost per year column is correct (cost divided by capacity) + assert abs(swqld_row["2024_25_$/mw"].iloc[0] - (10 / 100)) < 1e-6 + assert abs(swqld_row["2025_26_$/mw"].iloc[0] - (20 / 100)) < 1e-6 + assert abs(swv_row["2024_25_$/mw"].iloc[0] - (15 / 160)) < 1e-6 + assert abs(swv_row["2025_26_$/mw"].iloc[0] - (25 / 160)) < 1e-6 + + +def test_template_rez_transmission_costs_use_first_year_with_valid_costs(): + """ + Test that the first year with non-nan cost data for all options is used. + """ + # SWQLD1: only 2025_26 has all non-nan costs + aug_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "Additional network capacity (MW)": [150, 150, 150], + } + ) + # Even though option 3 is cheaper than option 2 in 2024_25, option 1 should get + # chosen because 2025_26 is used as the comparison year and it has the lowest cost there. + cost_table_swqld = pd.DataFrame( + { + "REZ constraint ID": ["SWQLD1", "SWQLD1", "SWQLD1"], + "Option": ["Option 1", "Option 2", "Option 3"], + "2024_25": [np.nan, 50, 10], + "2025_26": [35, 45, 50], + } + ) + # SWV1: all years have valid costs + aug_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1"], + "Option": ["Option 1A", "Option 1B"], + "Additional network capacity (MW)": [90, 100], + } + ) + cost_table_swv = pd.DataFrame( + { + "REZ constraint ID": ["SWV1", "SWV1"], + "Option": ["Option 1A", "Option 1B"], + "2024_25": [100, 10], + "2025_26": [10, 100], + } + ) + iasr_tables = { + "rez_augmentation_options_QLD": aug_table_swqld, + "rez_augmentation_options_VIC": aug_table_swv, + "rez_augmentation_costs_progressive_change_QLD": cost_table_swqld, + "rez_augmentation_costs_progressive_change_VIC": cost_table_swv, + } + scenario = "Progressive Change" + result = _template_rez_transmission_costs(iasr_tables, scenario, ["SWV1", "SWQLD1"]) + # SWQLD1: Only 2025_26 has all non-nan costs, so selection is based on that year for all years + swqld_row = result[result["rez_constraint_id"] == "SWQLD1"] + assert swqld_row["option"].iloc[0] == "Option 1" + assert swqld_row["additional_network_capacity_mw"].iloc[0] == 150 + assert np.isnan(swqld_row["2024_25_$/mw"].iloc[0]) + assert abs(swqld_row["2025_26_$/mw"].iloc[0] - (35 / 150)) < 1e-6 + # SWV1: both years valid, Option 1B is the least cost only in first, + # but should be chosen on this basis. + swv_row = result[result["rez_constraint_id"] == "SWV1"] + assert swv_row["option"].iloc[0] == "Option 1B" + assert swv_row["additional_network_capacity_mw"].iloc[0] == 100 + assert abs(swv_row["2024_25_$/mw"].iloc[0] - (10 / 100)) < 1e-6 + assert abs(swv_row["2025_26_$/mw"].iloc[0] - (100 / 100)) < 1e-6 diff --git a/tests/test_translator/test_create_expansion_limit_constraints.py b/tests/test_translator/test_create_expansion_limit_constraints.py new file mode 100644 index 0000000..78c6a3c --- /dev/null +++ b/tests/test_translator/test_create_expansion_limit_constraints.py @@ -0,0 +1,380 @@ +import pandas as pd +import pytest + +from ispypsa.translator.custom_constraints import _create_expansion_limit_constraints + + +def test_create_expansion_limit_constraints_basic(csv_str_to_df): + """Test basic creation of expansion limit constraints for links and flow paths.""" + + # Input: links with some extendable + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_existing, AC, BUS1, BUS2, 1000, False + PATH1, PATH1_exp_2025, AC, BUS1, BUS2, 0, True + PATH2, PATH2_exp_2025, AC, BUS3, BUS4, 0, True + """ + links = csv_str_to_df(links_csv) + + # Input: flow path expansion costs + flow_paths_csv = """ + flow_path, additional_network_capacity_mw + PATH1, 500 + PATH2, 800 + """ + flow_paths = csv_str_to_df(flow_paths_csv) + + # No constraint generators or REZ connections + constraint_generators = None + rez_connections = None + + # Call the function under test + lhs, rhs = _create_expansion_limit_constraints( + links, constraint_generators, flow_paths, rez_connections + ) + + # Expected LHS output (only extendable links) + expected_lhs_csv = """ + constraint_name, variable_name, component, attribute, coefficient + PATH1_expansion_limit, PATH1_exp_2025, Link, p_nom, 1.0 + PATH2_expansion_limit, PATH2_exp_2025, Link, p_nom, 1.0 + """ + expected_lhs = csv_str_to_df(expected_lhs_csv) + + # Expected RHS output + expected_rhs_csv = """ + constraint_name, rhs, constraint_type + PATH1_expansion_limit, 500, <= + PATH2_expansion_limit, 800, <= + """ + expected_rhs = csv_str_to_df(expected_rhs_csv) + + # Compare DataFrames + pd.testing.assert_frame_equal( + lhs.sort_values(["constraint_name", "variable_name"]).reset_index(drop=True), + expected_lhs.sort_values(["constraint_name", "variable_name"]).reset_index( + drop=True + ), + ) + + pd.testing.assert_frame_equal( + rhs.sort_values("constraint_name").reset_index(drop=True), + expected_rhs.sort_values("constraint_name").reset_index(drop=True), + ) + + +def test_create_expansion_limit_constraints_with_generators(csv_str_to_df): + """Test creation of expansion limit constraints with constraint generators.""" + + # Input: links with some extendable + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_exp_2025, AC, BUS1, BUS2, 0, True + """ + links = csv_str_to_df(links_csv) + + # Input: constraint generators + constraint_generators_csv = """ + constraint_name, name, isp_name, bus, p_nom, p_nom_extendable + REZ_NSW, REZ_NSW_exp_2025, REZ_NSW, bus_for_custom_constraint_gens, 0.0, True + REZ_NSW, REZ_NSW_exp_2030, REZ_NSW, bus_for_custom_constraint_gens, 0.0, True + REZ_VIC, REZ_VIC_exp_2025, REZ_VIC, bus_for_custom_constraint_gens, 0.0, True + """ + constraint_generators = csv_str_to_df(constraint_generators_csv) + + # Input: flow paths and REZ connections + flow_paths_csv = """ + flow_path, additional_network_capacity_mw + PATH1, 500 + """ + flow_paths = csv_str_to_df(flow_paths_csv) + + rez_connections_csv = """ + rez_constraint_id, additional_network_capacity_mw + REZ_NSW, 1000 + REZ_VIC, 750 + """ + rez_connections = csv_str_to_df(rez_connections_csv) + + # Call the function under test + lhs, rhs = _create_expansion_limit_constraints( + links, constraint_generators, flow_paths, rez_connections + ) + + # Expected LHS output (links and generators) + expected_lhs_csv = """ + constraint_name, variable_name, component, attribute, coefficient + PATH1_expansion_limit, PATH1_exp_2025, Link, p_nom, 1.0 + REZ_NSW_expansion_limit, REZ_NSW_exp_2025, Generator, p_nom, 1.0 + REZ_NSW_expansion_limit, REZ_NSW_exp_2030, Generator, p_nom, 1.0 + REZ_VIC_expansion_limit, REZ_VIC_exp_2025, Generator, p_nom, 1.0 + """ + expected_lhs = csv_str_to_df(expected_lhs_csv) + + # Expected RHS output + expected_rhs_csv = """ + constraint_name, rhs, constraint_type + PATH1_expansion_limit, 500, <= + REZ_NSW_expansion_limit, 1000, <= + REZ_VIC_expansion_limit, 750, <= + """ + expected_rhs = csv_str_to_df(expected_rhs_csv) + + # Compare DataFrames + pd.testing.assert_frame_equal( + lhs.sort_values(["constraint_name", "variable_name"]).reset_index(drop=True), + expected_lhs.sort_values(["constraint_name", "variable_name"]).reset_index( + drop=True + ), + ) + + pd.testing.assert_frame_equal( + rhs.sort_values("constraint_name").reset_index(drop=True), + expected_rhs.sort_values("constraint_name").reset_index(drop=True), + ) + + +def test_create_expansion_limit_constraints_no_extendable_links(csv_str_to_df): + """Test when no links are extendable.""" + + # Input: links with none extendable + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_existing, AC, BUS1, BUS2, 1000, False + PATH2, PATH2_existing, AC, BUS3, BUS4, 2000, False + """ + links = csv_str_to_df(links_csv) + + # Input: flow paths (but no extendable links to constrain) + flow_paths_csv = """ + flow_path, additional_network_capacity_mw + PATH1, 500 + PATH2, 800 + """ + flow_paths = csv_str_to_df(flow_paths_csv) + + # Call the function under test + lhs, rhs = _create_expansion_limit_constraints(links, None, flow_paths, None) + + # Should return empty DataFrames since no links are extendable + assert lhs.empty + assert rhs.empty + + +def test_create_expansion_limit_constraints_filter_rhs_by_lhs(csv_str_to_df): + """Test that RHS is filtered to only include constraints with corresponding LHS.""" + + # Input: links with only PATH1 extendable + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_exp_2025, AC, BUS1, BUS2, 0, True + PATH2, PATH2_existing, AC, BUS3, BUS4, 2000, False + """ + links = csv_str_to_df(links_csv) + + # Input: flow paths for both PATH1 and PATH2 + flow_paths_csv = """ + flow_path, additional_network_capacity_mw + PATH1, 500 + PATH2, 800 + """ + flow_paths = csv_str_to_df(flow_paths_csv) + + # Call the function under test + lhs, rhs = _create_expansion_limit_constraints(links, None, flow_paths, None) + + # Expected LHS output (only PATH1) + expected_lhs_csv = """ + constraint_name, variable_name, component, attribute, coefficient + PATH1_expansion_limit, PATH1_exp_2025, Link, p_nom, 1.0 + """ + expected_lhs = csv_str_to_df(expected_lhs_csv) + + # Expected RHS output (only PATH1, PATH2 filtered out) + expected_rhs_csv = """ + constraint_name, rhs, constraint_type + PATH1_expansion_limit, 500, <= + """ + expected_rhs = csv_str_to_df(expected_rhs_csv) + + # Compare DataFrames + pd.testing.assert_frame_equal( + lhs.sort_values(["constraint_name", "variable_name"]).reset_index(drop=True), + expected_lhs.sort_values(["constraint_name", "variable_name"]).reset_index( + drop=True + ), + ) + + pd.testing.assert_frame_equal( + rhs.sort_values("constraint_name").reset_index(drop=True), + expected_rhs.sort_values("constraint_name").reset_index(drop=True), + ) + + +def test_create_expansion_limit_constraints_all_none(csv_str_to_df): + """Test when all inputs are None.""" + + # Call the function under test with all None + lhs, rhs = _create_expansion_limit_constraints(None, None, None, None) + + # Should return empty DataFrames + assert lhs.empty + assert rhs.empty + + +def test_create_expansion_limit_constraints_empty_inputs(csv_str_to_df): + """Test when all inputs are empty DataFrames.""" + + # Call the function under test with empty DataFrames + lhs, rhs = _create_expansion_limit_constraints( + pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame() + ) + + # Should return empty DataFrames + assert lhs.empty + assert rhs.empty + + +def test_create_expansion_limit_constraints_mixed_empty_none(csv_str_to_df): + """Test with mix of None and empty inputs. + + This output is expect from _create_expansion_limit_constraints, but will raise an error at the + validation stage in _translate_custom_constraints + """ + + # Input: extendable links + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_exp_2025, AC, BUS1, BUS2, 0, True + """ + links = csv_str_to_df(links_csv) + + # Flow paths is None, REZ connections is empty + flow_paths = None + rez_connections = pd.DataFrame() + + # Call the function under test + lhs, rhs = _create_expansion_limit_constraints( + links, None, flow_paths, rez_connections + ) + + # LHS should have the link constraint but RHS should be empty (no RHS data) + expected_lhs_csv = """ + constraint_name, variable_name, component, attribute, coefficient + PATH1_expansion_limit, PATH1_exp_2025, Link, p_nom, 1.0 + """ + expected_lhs = csv_str_to_df(expected_lhs_csv) + + pd.testing.assert_frame_equal( + lhs.sort_values(["constraint_name", "variable_name"]).reset_index(drop=True), + expected_lhs.sort_values(["constraint_name", "variable_name"]).reset_index( + drop=True + ), + ) + + # RHS should be empty since no RHS data was provided + assert rhs.empty + + +def test_create_expansion_limit_constraints_only_rhs_no_lhs(csv_str_to_df): + """Test when RHS data exists but no LHS (no extendable components).""" + + # Input: no links + links = None + + # Input: flow paths + flow_paths_csv = """ + flow_path, additional_network_capacity_mw + PATH1, 500 + PATH2, 800 + """ + flow_paths = csv_str_to_df(flow_paths_csv) + + # Call the function under test + lhs, rhs = _create_expansion_limit_constraints(links, None, flow_paths, None) + + # Should return empty DataFrames since there's no LHS + assert lhs.empty + assert rhs.empty + + +def test_create_expansion_limit_constraints_missing_rhs_columns(csv_str_to_df): + """Test that ValueError is raised when RHS data is missing expected columns.""" + + # Input: links + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_exp_2025, AC, BUS1, BUS2, 0, True + """ + links = csv_str_to_df(links_csv) + + # Input: flow paths with wrong column names + flow_paths_csv = """ + wrong_column, wrong_capacity_column + PATH1, 500 + """ + flow_paths = csv_str_to_df(flow_paths_csv) + + # Should raise ValueError due to missing required columns + with pytest.raises( + ValueError, + match="RHS components missing required columns after processing: \\['constraint_name', 'rhs'\\]", + ): + _create_expansion_limit_constraints(links, None, flow_paths, None) + + +def test_create_expansion_limit_constraints_duplicate_constraints(csv_str_to_df): + """Test handling of duplicate constraint names from different sources.""" + + # Input: links + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + TEST1, TEST1_exp_2025, AC, BUS1, BUS2, 0, True + """ + links = csv_str_to_df(links_csv) + + # Input: flow paths and REZ connections with same constraint name + flow_paths_csv = """ + flow_path, additional_network_capacity_mw + TEST1, 500 + """ + flow_paths = csv_str_to_df(flow_paths_csv) + + rez_connections_csv = """ + rez_constraint_id, additional_network_capacity_mw + TEST1, 1000 + """ + rez_connections = csv_str_to_df(rez_connections_csv) + + # Call the function under test + lhs, rhs = _create_expansion_limit_constraints( + links, None, flow_paths, rez_connections + ) + + # Expected LHS output + expected_lhs_csv = """ + constraint_name, variable_name, component, attribute, coefficient + TEST1_expansion_limit, TEST1_exp_2025, Link, p_nom, 1.0 + """ + expected_lhs = csv_str_to_df(expected_lhs_csv) + + # Expected RHS output (both entries concatenated) + expected_rhs_csv = """ + constraint_name, rhs, constraint_type + TEST1_expansion_limit, 500, <= + TEST1_expansion_limit, 1000, <= + """ + expected_rhs = csv_str_to_df(expected_rhs_csv) + + # Compare DataFrames + pd.testing.assert_frame_equal( + lhs.sort_values(["constraint_name", "variable_name"]).reset_index(drop=True), + expected_lhs.sort_values(["constraint_name", "variable_name"]).reset_index( + drop=True + ), + ) + + pd.testing.assert_frame_equal( + rhs.sort_values(["constraint_name", "rhs"]).reset_index(drop=True), + expected_rhs.sort_values(["constraint_name", "rhs"]).reset_index(drop=True), + ) diff --git a/tests/test_translator/test_create_pypsa_friendly_inputs.py b/tests/test_translator/test_create_pypsa_friendly_inputs.py index 187249c..29301c3 100644 --- a/tests/test_translator/test_create_pypsa_friendly_inputs.py +++ b/tests/test_translator/test_create_pypsa_friendly_inputs.py @@ -2,7 +2,7 @@ import pandas as pd -from ispypsa.config import load_config +from ispypsa.config import ModelConfig, load_config from ispypsa.data_fetch import read_csvs from ispypsa.templater import ( create_ispypsa_inputs_template, @@ -125,68 +125,49 @@ def test_create_pypsa_friendly_snapshots_operational(): assert len(snapshots) == 1344 -def test_create_pypsa_inputs_template_sub_regions(workbook_table_cache_test_path: Path): - iasr_tables = read_csvs(workbook_table_cache_test_path) - manual_tables = load_manually_extracted_tables("6.0") - config = load_config(Path(__file__).parent / Path("ispypsa_config.yaml")) - template_tables = create_ispypsa_inputs_template( - config.scenario, - config.network.nodes.regional_granularity, - iasr_tables, - manual_tables, +def test_create_pypsa_inputs_template_sub_regions( + sample_model_config: ModelConfig, + sample_ispypsa_tables: dict[str, pd.DataFrame], +): + pypsa_tables = create_pypsa_friendly_inputs( + sample_model_config, sample_ispypsa_tables ) - pypsa_tables = create_pypsa_friendly_inputs(config, template_tables) - - for table in list_translator_output_files(): - assert table in pypsa_tables.keys() - assert "SQ" in pypsa_tables["buses"]["name"].values - assert "Q1" in pypsa_tables["buses"]["name"].values + assert "CNSW" in pypsa_tables["buses"]["name"].values + assert "Central-West Orana REZ" in pypsa_tables["buses"]["name"].values def test_create_pypsa_inputs_template_sub_regions_rezs_not_nodes( - workbook_table_cache_test_path: Path, + sample_model_config: ModelConfig, + sample_ispypsa_tables: dict[str, pd.DataFrame], ): - iasr_tables = read_csvs(workbook_table_cache_test_path) - manual_tables = load_manually_extracted_tables("6.0") - config = load_config(Path(__file__).parent / Path("ispypsa_config.yaml")) - config.network.nodes.rezs = "attached_to_parent_node" - template_tables = create_ispypsa_inputs_template( - config.scenario, - config.network.nodes.regional_granularity, - iasr_tables, - manual_tables, + sample_model_config.network.nodes.rezs = "attached_to_parent_node" + pypsa_tables = create_pypsa_friendly_inputs( + sample_model_config, sample_ispypsa_tables ) - pypsa_tables = create_pypsa_friendly_inputs(config, template_tables) for table in list_translator_output_files(): assert table in pypsa_tables.keys() - assert "SQ" in pypsa_tables["buses"]["name"].values - assert "Q1" not in pypsa_tables["buses"]["name"].values + assert "CNSW" in pypsa_tables["buses"]["name"].values + assert "Central-West Orana REZ" not in pypsa_tables["buses"]["name"].values def test_create_ispypsa_inputs_template_single_regions( - workbook_table_cache_test_path: Path, + sample_ispypsa_tables: dict[str, pd.DataFrame], + sample_model_config: ModelConfig, ): - iasr_tables = read_csvs(workbook_table_cache_test_path) - manual_tables = load_manually_extracted_tables("6.0") - config = load_config(Path(__file__).parent / Path("ispypsa_config.yaml")) - config.network.nodes.regional_granularity = "single_region" - config.network.nodes.rezs = "attached_to_parent_node" - template_tables = create_ispypsa_inputs_template( - config.scenario, - config.network.nodes.regional_granularity, - iasr_tables, - manual_tables, + sample_model_config.network.nodes.regional_granularity = "single_region" + sample_model_config.network.nodes.rezs = "attached_to_parent_node" + pypsa_tables = create_pypsa_friendly_inputs( + sample_model_config, sample_ispypsa_tables ) - pypsa_tables = create_pypsa_friendly_inputs(config, template_tables) for table in list_translator_output_files(): assert table in pypsa_tables.keys() assert "NEM" in pypsa_tables["buses"]["name"].values - assert pypsa_tables["lines"].empty + assert pypsa_tables["links"].empty class DummyConfigTwo: diff --git a/tests/test_translator/test_investment_period_weighting.py b/tests/test_translator/test_investment_period_weighting.py index 04b134d..ba3a2cc 100644 --- a/tests/test_translator/test_investment_period_weighting.py +++ b/tests/test_translator/test_investment_period_weighting.py @@ -15,11 +15,11 @@ def test_create_investment_period_weightings_basic(): expected = pd.DataFrame( { "period": [2020, 2030, 2040], - "years": [10, 10, 10], + "years": [10, 10, 11], "objective": [ sum([(1 / (1 + 0.05) ** t) for t in range(0, 10)]), sum([(1 / (1 + 0.05) ** t) for t in range(10, 20)]), - sum([(1 / (1 + 0.05) ** t) for t in range(20, 30)]), + sum([(1 / (1 + 0.05) ** t) for t in range(20, 31)]), ], } ) @@ -44,11 +44,11 @@ def test_create_investment_period_weightings_variable_length(): expected = pd.DataFrame( { "period": [2020, 2025, 2035], - "years": [5, 10, 15], + "years": [5, 10, 16], "objective": [ sum([(1 / (1 + 0.05) ** t) for t in range(0, 5)]), sum([(1 / (1 + 0.05) ** t) for t in range(5, 15)]), - sum([(1 / (1 + 0.05) ** t) for t in range(15, 30)]), + sum([(1 / (1 + 0.05) ** t) for t in range(15, 31)]), ], } ) @@ -73,8 +73,8 @@ def test_create_investment_period_weightings_zero_discount(): expected = pd.DataFrame( { "period": [2020, 2030], - "years": [10, 10], - "objective": [10.0, 10.0], # Weight equals years with no discounting + "years": [10, 11], + "objective": [10.0, 11.0], # Weight equals years with no discounting } ) @@ -98,8 +98,8 @@ def test_create_investment_period_weightings_single_period(): expected = pd.DataFrame( { "period": [2020], - "years": [10], - "objective": [sum([(1 / (1 + 0.05) ** t) for t in range(0, 10)])], + "years": [11], + "objective": [sum([(1 / (1 + 0.05) ** t) for t in range(0, 11)])], } ) @@ -123,10 +123,10 @@ def test_create_investment_period_weightings_alternative_discount(): expected = pd.DataFrame( { "period": [2020, 2025], - "years": [5, 5], + "years": [5, 6], "objective": [ sum([(1 / (1 + 0.10) ** t) for t in range(0, 5)]), - sum([(1 / (1 + 0.10) ** t) for t in range(5, 10)]), + sum([(1 / (1 + 0.10) ** t) for t in range(5, 11)]), ], } ) @@ -159,14 +159,14 @@ def test_create_investment_period_weightings_trivial_discount(): # With r = 1.0, the discounted weights are: # Period 1 (2020-2022): [1, 0.5] = 1.5 - # Period 2 (2022-2024): [0.25, 0.125] = 0.375 + # Period 2 (2022-2024): [0.25, 0.125, 0.0625] = 0.4375 # Expected result with manually calculated values expected = pd.DataFrame( { "period": [2020, 2022], - "years": [2, 2], - "objective": [1.5, 0.375], # Manually verified + "years": [2, 3], + "objective": [1.5, 0.4375], # Manually verified } ) diff --git a/tests/test_translator/test_links.py b/tests/test_translator/test_links.py new file mode 100644 index 0000000..fcb50bf --- /dev/null +++ b/tests/test_translator/test_links.py @@ -0,0 +1,389 @@ +import io +import re + +import pandas as pd +import pytest + +from ispypsa.translator.links import ( + _translate_existing_flow_path_capacity_to_links, + _translate_expansion_costs_to_links, + _translate_flow_paths_to_links, +) + + +def test_translate_existing_flow_path_capacity_to_links(csv_str_to_df): + """Test that existing flow paths are correctly translated to links.""" + # Create sample data for testing + existing_flow_paths_csv = """ + flow_path, carrier, node_from, node_to, forward_direction_mw_summer_typical, reverse_direction_mw_summer_typical + PathA-PathB, AC, NodeA, NodeB, 1000, 1000 + PathB-PathC, AC, NodeB, NodeC, 2000, 2000 + """ + existing_flow_paths = csv_str_to_df(existing_flow_paths_csv) + + # Expected result + expected_links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_min_pu, build_year, lifetime, capital_cost, p_nom_extendable + PathA-PathB, PathA-PathB_existing, AC, NodeA, NodeB, 1000, -1.0, 0, INF, , False + PathB-PathC, PathB-PathC_existing, AC, NodeB, NodeC, 2000, -1.0, 0, INF, , False + """ + expected_links = csv_str_to_df(expected_links_csv) + expected_links["capital_cost"] = pd.to_numeric( + expected_links["capital_cost"], errors="coerce" + ) + + # Convert the flow paths to links + result = _translate_existing_flow_path_capacity_to_links(existing_flow_paths) + + # Assert the results match expectations + pd.testing.assert_frame_equal( + result.sort_index(axis=1), expected_links.sort_index(axis=1) + ) + + +def test_translate_expansion_costs_to_links(csv_str_to_df): + """Test that flow path expansion costs are correctly translated to links.""" + # Create sample data for testing + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw, 2026_27_$/mw + NodeA-NodeB, 500, , 1200 + NodeB-NodeC, 800, 1500, 1800 + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + existing_links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom + NodeA-NodeB, NodeA-NodeB_existing, AC, NodeA, NodeB, 1000 + NodeB-NodeC, NodeB-NodeC_existing, AC, NodeB, NodeC, 2000 + """ + existing_links_df = csv_str_to_df(existing_links_csv) + + investment_periods = [2026, 2027] + year_type = "fy" + wacc = 0.07 + asset_lifetime = 30 + + result = _translate_expansion_costs_to_links( + flow_path_expansion_costs, + existing_links_df, + investment_periods, + year_type, + wacc, + asset_lifetime, + id_column="flow_path", + match_column="isp_name", + ) + + # Expected result structure - use a fixed capital_cost for assertion purposes + # The actual values depend on the annuitization formula + expected_result_csv = """ + isp_name, name, bus0, bus1, carrier, p_nom, p_nom_extendable, build_year, lifetime + NodeB-NodeC, NodeB-NodeC_exp_2026, NodeB, NodeC, AC, 0.0, True, 2026, INF + NodeA-NodeB, NodeA-NodeB_exp_2027, NodeA, NodeB, AC, 0.0, True, 2027, INF + NodeB-NodeC, NodeB-NodeC_exp_2027, NodeB, NodeC, AC, 0.0, True, 2027, INF + """ + expected_result = csv_str_to_df(expected_result_csv) + + # Sort both result and expected result for comparison + result = result.sort_values(["name"]).reset_index(drop=True) + expected_result = expected_result.sort_values(["name"]).reset_index(drop=True) + + # Check capital costs separately - should be greater than 0 + assert all(result["capital_cost"] > 0) + result = result.drop(columns="capital_cost") + + # Check that column names match + assert set(expected_result.columns).issubset(set(result.columns)) + + # Check all columns except capital_cost (which uses the annuitization formula) + for col in expected_result.columns: + pd.testing.assert_series_equal( + result[col], + expected_result[col], + check_dtype=False, # Allow float vs int differences + check_names=False, # Ignore index names + ) + + +def test_translate_expansion_costs_to_links_empty(csv_str_to_df): + """Test that empty flow path expansion costs result in empty DataFrame.""" + # Create empty DataFrame + flow_path_expansion_costs_csv = """ + flow_path,additional_network_capacity_mw,2025_26_$/mw + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + existing_links_csv = """ + name, carrier, bus0, bus1, p_nom + PathA-PathB_existing, AC, NodeA, NodeB, 1000 + """ + existing_links_df = csv_str_to_df(existing_links_csv) + + result = _translate_expansion_costs_to_links( + flow_path_expansion_costs, + existing_links_df, + [2026], + "fy", + 0.07, + 30, + id_column="flow_path", + match_column="name", + ) + + # The result should be an empty DataFrame + assert result.empty + + +def test_translate_expansion_costs_to_links_na_values(csv_str_to_df): + """Test that na flow path expansion costs result in empty DataFrame.""" + # Create empty DataFrame + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw + A, 100.0, , + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + existing_links_csv = """ + name, carrier, bus0, bus1, p_nom + PathA-PathB_existing, AC, NodeA, NodeB, 1000 + """ + existing_links_df = csv_str_to_df(existing_links_csv) + + result = _translate_expansion_costs_to_links( + flow_path_expansion_costs, + existing_links_df, + [2026], + "fy", + 0.07, + 30, + id_column="flow_path", + match_column="name", + ) + + # The result should be an empty DataFrame + assert result.empty + + +def test_translate_expansion_costs_to_links_no_matching_years(csv_str_to_df): + """Test when none of the expansion costs match the investment periods.""" + # Create sample data for testing + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw + PathA-PathB, 500, 1000 + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + existing_links_csv = """ + name, carrier, bus0, bus1, p_nom + PathA-PathB_existing, AC, NodeA, NodeB, 1000 + """ + existing_links_df = csv_str_to_df(existing_links_csv) + + # Investment periods don't include 2026 + investment_periods = [2027, 2028] + year_type = "fy" + wacc = 0.07 + asset_lifetime = 30 + + # Call the function with updated parameters + result = _translate_expansion_costs_to_links( + flow_path_expansion_costs, + existing_links_df, + investment_periods, + year_type, + wacc, + asset_lifetime, + id_column="flow_path", + match_column="name", + ) + + # The result should be an empty DataFrame since no years match + assert result.empty + + +def test_translate_flow_paths_to_links_with_expansion(csv_str_to_df): + """Test that flow paths are translated to links with expansion.""" + # Create sample input data + flow_paths_csv = """ + flow_path, carrier, node_from, node_to, forward_direction_mw_summer_typical, reverse_direction_mw_summer_typical + PathA-PathB, AC, NodeA, NodeB, 1000, 1000 + PathB-PathC, AC, NodeB, NodeC, 2000, 2000 + """ + + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw, 2026_27_$/mw + PathA-PathB, 500, 1000, 1200 + PathB-PathC, 800, 1500, 1800 + """ + + ispypsa_tables = { + "flow_paths": csv_str_to_df(flow_paths_csv), + "flow_path_expansion_costs": csv_str_to_df(flow_path_expansion_costs_csv), + } + + # Mock config with expansion enabled + class MockTemporalConfig: + class MockCapacityExpansion: + investment_periods = [2026, 2027] + + year_type = "fy" + capacity_expansion = MockCapacityExpansion() + + class MockNetworkConfig: + annuitisation_lifetime = 30 + transmission_expansion = True # This is the key parameter needed + + class MockConfig: + temporal = MockTemporalConfig() + network = MockNetworkConfig() + wacc = 0.07 + + config = MockConfig() + + # Call the function + result = _translate_flow_paths_to_links(ispypsa_tables, config) + + # Check the result is of the expected length + assert len(result) == 6 + + # Check that the result includes both existing and expansion links + assert any("_existing" in name for name in result["name"]) + assert any("_exp_" in name for name in result["name"]) + + +def test_translate_flow_paths_to_links_without_expansion(csv_str_to_df): + """Test that flow paths are translated to links without expansion.""" + # Create sample input data + flow_paths_csv = """ + flow_path, carrier, node_from, node_to, forward_direction_mw_summer_typical, reverse_direction_mw_summer_typical + PathA-PathB, AC, NodeA, NodeB, 1000, 1000 + PathB-PathC, AC, NodeB, NodeC, 2000, 2000 + """ + + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw, 2026_27_$/mw + PathA-PathB, 500, 1000, 1200 + PathB-PathC, 800, 1500, 1800 + """ + + ispypsa_tables = { + "flow_paths": csv_str_to_df(flow_paths_csv), + "flow_path_expansion_costs": csv_str_to_df(flow_path_expansion_costs_csv), + } + + # Mock config with expansion disabled + class MockTemporalConfig: + class MockCapacityExpansion: + investment_periods = [2026, 2027] + + year_type = "fy" + capacity_expansion = MockCapacityExpansion() + + class MockNetworkConfig: + annuitisation_lifetime = 30 + transmission_expansion = False # This is the key parameter needed + + class MockConfig: + temporal = MockTemporalConfig() + network = MockNetworkConfig() + wacc = 0.07 + + config = MockConfig() + + # Call the function + result = _translate_flow_paths_to_links(ispypsa_tables, config) + + # Expected result - only existing links, no expansion links + expected_result_csv = """ + name, bus0, bus1, p_nom, p_min_pu, capital_cost, p_nom_extendable, carrier + PathA-PathB_existing, NodeA, NodeB, 1000, -1.0, , False, AC + PathB-PathC_existing, NodeB, NodeC, 2000, -1.0, , False, AC + """ + expected_result = csv_str_to_df(expected_result_csv) + expected_result["capital_cost"] = pd.to_numeric( + expected_result["capital_cost"], errors="coerce" + ) + + # Sort both dataframes for comparison + result = result.sort_values("name").reset_index(drop=True) + expected_result = expected_result.sort_values("name").reset_index(drop=True) + + # Assert the results match expectations + for col in expected_result.columns: + pd.testing.assert_series_equal( + result[col], + expected_result[col], + check_dtype=False, + check_names=False, + ) + + +def test_translate_expansion_costs_to_links_calendar_year_error(csv_str_to_df): + """Test that calendar year type raises a NotImplementedError.""" + # Create sample data + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw + PathA-PathB, 500, 1000 + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + existing_links_csv = """ + name, carrier, bus0, bus1, p_nom + PathA-PathB_existing, AC, NodeA, NodeB, 1000 + """ + existing_links_df = csv_str_to_df(existing_links_csv) + + investment_periods = [2026] + year_type = "calendar" # This should trigger the error + wacc = 0.07 + asset_lifetime = 30 + + # Check that the correct error is raised + with pytest.raises( + NotImplementedError, + match="Calendar years are not implemented yet for transmission costs", + ): + _translate_expansion_costs_to_links( + flow_path_expansion_costs, + existing_links_df, + investment_periods, + year_type, + wacc, + asset_lifetime, + id_column="flow_path", + match_column="name", + ) + + +def test_translate_expansion_costs_to_links_invalid_year_type(csv_str_to_df): + """Test that an invalid year type raises a ValueError.""" + # Create sample data + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw, 2025_26_$/mw + PathA-PathB, 500, 1000 + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + existing_links_csv = """ + name, carrier, bus0, bus1, p_nom + PathA-PathB_existing, AC, NodeA, NodeB, 1000 + """ + existing_links_df = csv_str_to_df(existing_links_csv) + + investment_periods = [2026] + year_type = "invalid_year_type" # This should trigger the error + wacc = 0.07 + asset_lifetime = 30 + + # Check that the correct error is raised + with pytest.raises(ValueError, match="Unknown year_type"): + _translate_expansion_costs_to_links( + flow_path_expansion_costs, + existing_links_df, + investment_periods, + year_type, + wacc, + asset_lifetime, + id_column="flow_path", + match_column="name", + ) diff --git a/tests/test_translator/test_process_manual_custom_constraints.py b/tests/test_translator/test_process_manual_custom_constraints.py new file mode 100644 index 0000000..eeef584 --- /dev/null +++ b/tests/test_translator/test_process_manual_custom_constraints.py @@ -0,0 +1,828 @@ +import pandas as pd +import pytest + +from ispypsa.translator.custom_constraints import _process_manual_custom_constraints + + +def test_process_manual_custom_constraints_basic(csv_str_to_df): + """Test basic processing of manual custom constraints without REZ expansion.""" + + # Input: custom_constraints_lhs + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + CONS1, generator_capacity, GEN2, 1.0 + CONS2, link_flow, PATH1, 2.0 + """ + + # Input: custom_constraints_rhs + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + CONS1, 5000, <= + CONS2, 3000, >= + """ + + # Input: links + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_existing, AC, BUS1, BUS2, 1000, False + PATH1, PATH1_exp_2025, AC, BUS1, BUS2, 0, True + """ + + # Create input data + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = csv_str_to_df(links_csv) + + # Mock configuration with REZ expansion disabled + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Call the function under test + lhs, rhs, generators = _process_manual_custom_constraints( + config, ispypsa_tables, links + ) + + # Expected RHS output + expected_rhs_csv = """ + constraint_name, rhs, constraint_type + CONS1, 5000, <= + CONS2, 3000, >= + """ + expected_rhs = csv_str_to_df(expected_rhs_csv) + + # Expected LHS output + expected_lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + CONS1, GEN1, 1.0, Generator, p_nom + CONS1, GEN2, 1.0, Generator, p_nom + CONS2, PATH1_existing, 2.0, Link, p + CONS2, PATH1_exp_2025, 2.0, Link, p + """ + expected_lhs = csv_str_to_df(expected_lhs_csv) + + # Sort DataFrames for comparison + lhs_sorted = lhs.sort_values(["constraint_name", "variable_name"]).reset_index( + drop=True + ) + expected_lhs_sorted = expected_lhs.sort_values( + ["constraint_name", "variable_name"] + ).reset_index(drop=True) + + rhs_sorted = rhs.sort_values("constraint_name").reset_index(drop=True) + expected_rhs_sorted = expected_rhs.sort_values("constraint_name").reset_index( + drop=True + ) + + # Assert results + pd.testing.assert_frame_equal(lhs_sorted, expected_lhs_sorted) + pd.testing.assert_frame_equal(rhs_sorted, expected_rhs_sorted) + + # When REZ expansion is disabled, generators should be None + assert generators is None + + +def test_process_manual_custom_constraints_empty_tables(csv_str_to_df): + """Test processing when custom constraint tables are empty.""" + + # Create empty input data + ispypsa_tables = {} + links = pd.DataFrame() + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Call the function under test + lhs, rhs, generators = _process_manual_custom_constraints( + config, ispypsa_tables, links + ) + + # All outputs should be empty DataFrames + assert lhs.empty + assert rhs.empty + assert generators is None + + +def test_process_manual_custom_constraints_missing_rhs(csv_str_to_df): + """Test that ValueError is raised when LHS exists but RHS is missing.""" + + # Input: only custom_constraints_lhs (missing RHS) + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + """ + + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + } + links = pd.DataFrame() + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Should raise ValueError for incomplete tables + with pytest.raises( + ValueError, match="Incomplete manual custom constraints tables provided" + ): + _process_manual_custom_constraints(config, ispypsa_tables, links) + + +def test_process_manual_custom_constraints_missing_lhs(csv_str_to_df): + """Test that ValueError is raised when RHS exists but LHS is missing.""" + + # Input: only custom_constraints_rhs (missing LHS) + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + CONS1, 5000, <= + """ + + ispypsa_tables = { + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = pd.DataFrame() + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Should raise ValueError for incomplete tables + with pytest.raises( + ValueError, match="Incomplete manual custom constraints tables provided" + ): + _process_manual_custom_constraints(config, ispypsa_tables, links) + + +def test_process_manual_custom_constraints_with_rez_expansion(csv_str_to_df): + """Test processing with REZ expansion enabled and custom generators.""" + + # Input: custom_constraints_lhs + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + REZ_NSW, generator_capacity, GEN1, 1.0 + REZ_VIC, generator_capacity, GEN2, 1.0 + """ + + # Input: custom_constraints_rhs + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + REZ_NSW, 10000, <= + REZ_VIC, 8000, <= + """ + + # Input: REZ transmission expansion costs + rez_transmission_expansion_costs_csv = """ + rez_constraint_id, 2024_25_$/mw, 2029_30_$/mw, 2034_35_$/mw + REZ_NSW, 100, 110, 120 + REZ_VIC, 150, 160, 170 + REZ_QLD, 200, 210, 220 + """ + + # Input: links + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_existing, AC, BUS1, BUS2, 1000, False + """ + + # Create input data + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + "rez_transmission_expansion_costs": csv_str_to_df( + rez_transmission_expansion_costs_csv + ), + } + links = csv_str_to_df(links_csv) + + # Mock configuration with REZ expansion enabled + class MockNetworkConfig: + rez_transmission_expansion = True + annuitisation_lifetime = 30 + + class MockTemporalConfig: + class MockCapacityExpansion: + investment_periods = [2025, 2030, 2035] + + capacity_expansion = MockCapacityExpansion() + year_type = "fy" + + class MockConfig: + network = MockNetworkConfig() + temporal = MockTemporalConfig() + wacc = 0.07 + + config = MockConfig() + + # Call the function under test + lhs, rhs, generators = _process_manual_custom_constraints( + config, ispypsa_tables, links + ) + + # Expected RHS output + expected_rhs_csv = """ + constraint_name, rhs, constraint_type + REZ_NSW, 10000, <= + REZ_VIC, 8000, <= + """ + expected_rhs = csv_str_to_df(expected_rhs_csv) + + # Assert RHS is correct + rhs_sorted = rhs.sort_values("constraint_name").reset_index(drop=True) + expected_rhs_sorted = expected_rhs.sort_values("constraint_name").reset_index( + drop=True + ) + pd.testing.assert_frame_equal(rhs_sorted, expected_rhs_sorted) + + # Expected generators output + expected_generators_csv = """ + name, isp_name, bus, p_nom, p_nom_extendable, build_year, lifetime, capital_cost + REZ_NSW_exp_2025, REZ_NSW, bus_for_custom_constraint_gens, 0.0, True, 2025, inf, 9.43396 + REZ_NSW_exp_2030, REZ_NSW, bus_for_custom_constraint_gens, 0.0, True, 2030, inf, 10.37736 + REZ_NSW_exp_2035, REZ_NSW, bus_for_custom_constraint_gens, 0.0, True, 2035, inf, 11.32075 + REZ_VIC_exp_2025, REZ_VIC, bus_for_custom_constraint_gens, 0.0, True, 2025, inf, 14.15094 + REZ_VIC_exp_2030, REZ_VIC, bus_for_custom_constraint_gens, 0.0, True, 2030, inf, 15.09434 + REZ_VIC_exp_2035, REZ_VIC, bus_for_custom_constraint_gens, 0.0, True, 2035, inf, 16.03774 + """ + expected_generators = csv_str_to_df(expected_generators_csv) + + # Compare generators (excluding capital_cost which is calculated) + generators_sorted = generators.sort_values("name").reset_index(drop=True) + expected_generators_sorted = expected_generators.sort_values("name").reset_index( + drop=True + ) + + # Compare all columns except capital_cost + cols_to_compare = [ + "name", + "isp_name", + "bus", + "p_nom", + "p_nom_extendable", + "build_year", + ] + pd.testing.assert_frame_equal( + generators_sorted[cols_to_compare], expected_generators_sorted[cols_to_compare] + ) + + # Check lifetime is infinity + assert all(generators["lifetime"] == float("inf")) + + # Check capital costs are positive + assert all(generators["capital_cost"] > 0) + + # Expected LHS output (including both original and generator constraints) + expected_lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + REZ_NSW, GEN1, 1.0, Generator, p_nom + REZ_VIC, GEN2, 1.0, Generator, p_nom + REZ_NSW, REZ_NSW_exp_2025, -1.0, Generator, p_nom + REZ_NSW, REZ_NSW_exp_2030, -1.0, Generator, p_nom + REZ_NSW, REZ_NSW_exp_2035, -1.0, Generator, p_nom + REZ_VIC, REZ_VIC_exp_2025, -1.0, Generator, p_nom + REZ_VIC, REZ_VIC_exp_2030, -1.0, Generator, p_nom + REZ_VIC, REZ_VIC_exp_2035, -1.0, Generator, p_nom + """ + expected_lhs = csv_str_to_df(expected_lhs_csv) + + # Compare LHS + lhs_sorted = lhs.sort_values( + ["constraint_name", "coefficient", "variable_name"] + ).reset_index(drop=True) + expected_lhs_sorted = expected_lhs.sort_values( + ["constraint_name", "coefficient", "variable_name"] + ).reset_index(drop=True) + pd.testing.assert_frame_equal(lhs_sorted, expected_lhs_sorted) + + +def test_process_manual_custom_constraints_mixed_term_types(csv_str_to_df): + """Test processing with mixed term types (generator capacity, link flow, generator output).""" + + # Input: custom_constraints_lhs with mixed term types + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + CONS1, link_flow, PATH1, -0.5 + CONS1, generator_output, GEN4, 2.0 + CONS2, generator_capacity, GEN2, 3.0 + CONS2, generator_capacity, GEN3, -1.5 + """ + + # Input: custom_constraints_rhs + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + CONS1, 15000, <= + CONS2, 20000, <= + """ + + # Input: links + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_existing, AC, BUS1, BUS2, 1000, False + PATH1, PATH1_exp_2030, AC, BUS1, BUS2, 0, True + PATH2, PATH2_existing, AC, BUS3, BUS4, 2000, False + """ + + # Create input data + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = csv_str_to_df(links_csv) + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Call the function under test + lhs, rhs, generators = _process_manual_custom_constraints( + config, ispypsa_tables, links + ) + + # Expected RHS output + expected_rhs_csv = """ + constraint_name, rhs, constraint_type + CONS1, 15000, <= + CONS2, 20000, <= + """ + expected_rhs = csv_str_to_df(expected_rhs_csv) + + # Assert RHS is correct + rhs_sorted = rhs.sort_values("constraint_name").reset_index(drop=True) + expected_rhs_sorted = expected_rhs.sort_values("constraint_name").reset_index( + drop=True + ) + pd.testing.assert_frame_equal(rhs_sorted, expected_rhs_sorted) + + # Expected LHS output with mixed term types + expected_lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + CONS1, GEN1, 1.0, Generator, p_nom + CONS1, GEN4, 2.0, Generator, p + CONS1, PATH1_existing, -0.5, Link, p + CONS1, PATH1_exp_2030, -0.5, Link, p + CONS2, GEN2, 3.0, Generator, p_nom + CONS2, GEN3, -1.5, Generator, p_nom + """ + expected_lhs = csv_str_to_df(expected_lhs_csv) + + # Compare LHS + lhs_sorted = lhs.sort_values(["constraint_name", "variable_name"]).reset_index( + drop=True + ) + expected_lhs_sorted = expected_lhs.sort_values( + ["constraint_name", "variable_name"] + ).reset_index(drop=True) + pd.testing.assert_frame_equal(lhs_sorted, expected_lhs_sorted) + + # Generators should be None when REZ expansion is disabled + assert generators is None + + +def test_process_manual_custom_constraints_no_matching_links(csv_str_to_df): + """Test that ValueError is raised when link flow constraints reference non-existent links.""" + + # Input: custom_constraints_lhs with link flow term + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + CONS1, link_flow, NONEXISTENT, 2.0 + """ + + # Input: custom_constraints_rhs + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + CONS1, 5000, <= + """ + + # Input: links (doesn't contain NONEXISTENT) + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_existing, AC, BUS1, BUS2, 1000, False + """ + + # Create input data + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = csv_str_to_df(links_csv) + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Should raise ValueError for non-existent link + with pytest.raises( + ValueError, + match="The following link_flow terms reference links that don't exist: \\['NONEXISTENT'\\]", + ): + _process_manual_custom_constraints(config, ispypsa_tables, links) + + +def test_process_manual_custom_constraints_multiple_no_matching_links(csv_str_to_df): + """Test that ValueError is raised with multiple non-existent links in error message.""" + + # Input: custom_constraints_lhs with multiple link flow terms referencing non-existent links + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + CONS1, link_flow, NONEXISTENT1, 2.0 + CONS2, link_flow, NONEXISTENT2, 1.5 + CONS2, link_flow, PATH1, -1.0 + """ + + # Input: custom_constraints_rhs + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + CONS1, 5000, <= + CONS2, 3000, <= + """ + + # Input: links (contains PATH1 but not NONEXISTENT1 or NONEXISTENT2) + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_existing, AC, BUS1, BUS2, 1000, False + """ + + # Create input data + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = csv_str_to_df(links_csv) + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Should raise ValueError for non-existent links + with pytest.raises( + ValueError, + match="The following link_flow terms reference links that don't exist: \\['NONEXISTENT1', 'NONEXISTENT2'\\]", + ): + _process_manual_custom_constraints(config, ispypsa_tables, links) + + +def test_process_manual_custom_constraints_empty_lhs_table(csv_str_to_df): + """Test processing when LHS table exists but is empty.""" + + # Input: empty custom_constraints_lhs + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + """ + + # Input: empty custom_constraints_rhs + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + """ + + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = pd.DataFrame() + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Call the function under test + lhs, rhs, generators = _process_manual_custom_constraints( + config, ispypsa_tables, links + ) + + # All outputs should be empty + assert lhs.empty + assert rhs.empty + assert generators is None + + +def test_process_manual_custom_constraints_none_tables(csv_str_to_df): + """Test processing when tables contain None values.""" + + ispypsa_tables = { + "custom_constraints_lhs": None, + "custom_constraints_rhs": None, + } + links = pd.DataFrame() + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Call the function under test + lhs, rhs, generators = _process_manual_custom_constraints( + config, ispypsa_tables, links + ) + + # All outputs should be empty + assert lhs.empty + assert rhs.empty + assert generators is None + + +def test_process_manual_custom_constraints_empty_links_with_link_flow(csv_str_to_df): + """Test that ValueError is raised when links is empty but link_flow terms exist.""" + + # Input: custom_constraints_lhs with link_flow term + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + CONS1, link_flow, PATH1, 2.0 + """ + + # Input: custom_constraints_rhs + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + CONS1, 5000, <= + """ + + # Create input data with empty links DataFrame + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = pd.DataFrame() # Empty DataFrame + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Should raise ValueError because PATH1 doesn't exist in empty links + with pytest.raises( + ValueError, + match="The following link_flow terms reference links that don't exist: \\['PATH1'\\]", + ): + _process_manual_custom_constraints(config, ispypsa_tables, links) + + +def test_process_manual_custom_constraints_empty_links_without_link_flow(csv_str_to_df): + """Test processing with empty links when no link_flow terms exist.""" + + # Input: custom_constraints_lhs without link_flow terms + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + CONS1, generator_capacity, GEN2, 1.5 + """ + + # Input: custom_constraints_rhs + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + CONS1, 5000, <= + """ + + # Create input data with empty links DataFrame + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = pd.DataFrame() # Empty DataFrame + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Call the function under test + lhs, rhs, generators = _process_manual_custom_constraints( + config, ispypsa_tables, links + ) + + # Expected RHS output + expected_rhs_csv = """ + constraint_name, rhs, constraint_type + CONS1, 5000, <= + """ + expected_rhs = csv_str_to_df(expected_rhs_csv) + + # Expected LHS output (only generator constraints) + expected_lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + CONS1, GEN1, 1.0, Generator, p_nom + CONS1, GEN2, 1.5, Generator, p_nom + """ + expected_lhs = csv_str_to_df(expected_lhs_csv) + + # Compare DataFrames + pd.testing.assert_frame_equal( + rhs.sort_values("constraint_name").reset_index(drop=True), + expected_rhs.sort_values("constraint_name").reset_index(drop=True), + ) + + pd.testing.assert_frame_equal( + lhs.sort_values(["constraint_name", "variable_name"]).reset_index(drop=True), + expected_lhs.sort_values(["constraint_name", "variable_name"]).reset_index( + drop=True + ), + ) + + assert generators is None + + +def test_process_manual_custom_constraints_none_links_with_link_flow(csv_str_to_df): + """Test that ValueError is raised when links is None but link_flow terms exist.""" + + # Input: custom_constraints_lhs with link_flow term + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + CONS1, link_flow, PATH1, 2.0 + """ + + # Input: custom_constraints_rhs + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + CONS1, 5000, <= + """ + + # Create input data with None links + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = None # None instead of DataFrame + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Should raise ValueError because PATH1 doesn't exist (links is None) + with pytest.raises( + ValueError, + match="The following link_flow terms reference links that don't exist: \\['PATH1'\\]", + ): + _process_manual_custom_constraints(config, ispypsa_tables, links) + + +def test_process_manual_custom_constraints_none_links_without_link_flow(csv_str_to_df): + """Test processing with None links when no link_flow terms exist.""" + + # Input: custom_constraints_lhs without link_flow terms + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + CONS1, generator_capacity, GEN2, 1.5 + """ + + # Input: custom_constraints_rhs + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + CONS1, 5000, <= + """ + + # Create input data with None links + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = None # None instead of DataFrame + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # When there are no link_flow terms, links is not accessed, so it should work fine + lhs, rhs, generators = _process_manual_custom_constraints( + config, ispypsa_tables, links + ) + + # Expected RHS output + expected_rhs_csv = """ + constraint_name, rhs, constraint_type + CONS1, 5000, <= + """ + expected_rhs = csv_str_to_df(expected_rhs_csv) + + # Expected LHS output (only generator constraints) + expected_lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + CONS1, GEN1, 1.0, Generator, p_nom + CONS1, GEN2, 1.5, Generator, p_nom + """ + expected_lhs = csv_str_to_df(expected_lhs_csv) + + # Compare DataFrames + pd.testing.assert_frame_equal( + rhs.sort_values("constraint_name").reset_index(drop=True), + expected_rhs.sort_values("constraint_name").reset_index(drop=True), + ) + + pd.testing.assert_frame_equal( + lhs.sort_values(["constraint_name", "variable_name"]).reset_index(drop=True), + expected_lhs.sort_values(["constraint_name", "variable_name"]).reset_index( + drop=True + ), + ) + + assert generators is None + + +def test_process_manual_custom_constraints_links_missing_columns(csv_str_to_df): + """Test that appropriate error is raised when links is missing required columns.""" + + # Input: custom_constraints_lhs with link_flow term + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + CONS1, link_flow, PATH1, 2.0 + """ + + # Input: custom_constraints_rhs + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + CONS1, 5000, <= + """ + + # Input: links without required columns (missing isp_name) + links_csv = """ + wrong_column, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_existing, AC, BUS1, BUS2, 1000, False + """ + + # Create input data + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = csv_str_to_df(links_csv) + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Should raise KeyError when trying to access "isp_name" column + with pytest.raises(KeyError, match="isp_name"): + _process_manual_custom_constraints(config, ispypsa_tables, links) diff --git a/tests/test_translator/test_sample_ispypsa_tables_fixture.py b/tests/test_translator/test_sample_ispypsa_tables_fixture.py new file mode 100644 index 0000000..20a4d3c --- /dev/null +++ b/tests/test_translator/test_sample_ispypsa_tables_fixture.py @@ -0,0 +1,80 @@ +import pandas as pd +import pytest + +from ispypsa.translator import create_pypsa_friendly_inputs + + +def test_sample_ispypsa_tables_fixture(sample_ispypsa_tables): + """Test that the sample_ispypsa_tables fixture contains all required tables.""" + # Check that all required tables are present + expected_tables = [ + "sub_regions", + "nem_regions", + "renewable_energy_zones", + "flow_paths", + "ecaa_generators", + "custom_constraints_lhs", + "custom_constraints_rhs", + ] + + for table_name in expected_tables: + assert table_name in sample_ispypsa_tables + assert isinstance(sample_ispypsa_tables[table_name], pd.DataFrame) + assert not sample_ispypsa_tables[table_name].empty + + # Check specific table contents + assert len(sample_ispypsa_tables["sub_regions"]) == 2 # CNSW and NNSW + assert len(sample_ispypsa_tables["renewable_energy_zones"]) == 2 # 2 REZs + assert ( + len(sample_ispypsa_tables["ecaa_generators"]) == 6 + ) # 2 coal + 4 REZ generators + + +def test_create_pypsa_friendly_inputs_with_fixture( + sample_ispypsa_tables, sample_model_config +): + """Test that create_pypsa_friendly_inputs runs successfully with the fixture data.""" + # Run create_pypsa_friendly_inputs + result = create_pypsa_friendly_inputs(sample_model_config, sample_ispypsa_tables) + + # Check that all expected outputs are present + expected_outputs = [ + "snapshots", + "investment_period_weights", + "buses", + "links", + "generators", + "custom_constraints_lhs", + "custom_constraints_rhs", + ] + + for output in expected_outputs: + assert output in result + assert isinstance(result[output], pd.DataFrame) + + # custom_constraints_generators is only created when rez_transmission_expansion is True + if sample_model_config.network.rez_transmission_expansion: + assert "custom_constraints_generators" in result + assert isinstance(result["custom_constraints_generators"], pd.DataFrame) + + # Check specific results + # Buses should include sub-regions and REZs + assert len(result["buses"]) == 4 # 2 sub-regions + 2 REZs + + # Generators should include ECAA generators + unserved energy generators + assert len(result["generators"]) >= 8 # 6 ECAA + 2 unserved energy + + # Links should include flow paths and REZ connections + assert len(result["links"]) >= 3 # 1 flow path + 2 REZ connections + + # Custom constraints should be translated + assert not result["custom_constraints_rhs"].empty + assert not result["custom_constraints_lhs"].empty + + # Snapshots should be created based on config + assert not result["snapshots"].empty + assert "investment_periods" in result["snapshots"].columns + + # Investment period weights should be created + assert not result["investment_period_weights"].empty + assert len(result["investment_period_weights"]) == 2 # 2 investment periods diff --git a/tests/test_translator/test_translate_custom_constraints.py b/tests/test_translator/test_translate_custom_constraints.py index 170b314..3409023 100644 --- a/tests/test_translator/test_translate_custom_constraints.py +++ b/tests/test_translator/test_translate_custom_constraints.py @@ -1,90 +1,485 @@ -import numpy as np import pandas as pd +import pytest from ispypsa.translator.custom_constraints import ( - _translate_custom_constraint_lhs, - _translate_custom_constraint_rhs, - _translate_custom_constraints_generators, + _translate_custom_constraints, + _validate_lhs_rhs_constraints, ) -def test_translate_custom_constraints_generators(): - ispypsa_custom_constraint_gens = pd.DataFrame( - { - "variable_name": ["X", "Y"], - "constraint_id": ["A", "B"], - "indicative_transmission_expansion_cost_$/mw": [0.0, np.nan], - } - ) - expected_pypsa_custom_constraint_gens = pd.DataFrame( - { - "name": ["X", "Y"], - "constraint_name": ["A", "B"], - "capital_cost": [0.0, np.nan], - "bus": "bus_for_custom_constraint_gens", - "p_nom": [0.0, 0.0], - "p_nom_extendable": [True, False], - } - ) - pypsa_custom_constraint_gens = _translate_custom_constraints_generators( - [ispypsa_custom_constraint_gens], - expansion_on=True, - wacc=5.0, - asset_lifetime=10, - ) - pd.testing.assert_frame_equal( - expected_pypsa_custom_constraint_gens, pypsa_custom_constraint_gens - ) - - -def test_translate_custom_constraints_rhs(): - ispypsa_custom_constraint_rhs = pd.DataFrame( - { - "constraint_id": ["A", "B"], - "summer_typical": [10.0, 20.0], - } - ) - expected_pypsa_custom_constraint_rhs = pd.DataFrame( - { - "constraint_name": ["A", "B"], - "rhs": [10.0, 20.0], - } - ) - pypsa_custom_constraint_rhs = _translate_custom_constraint_rhs( - [ispypsa_custom_constraint_rhs] - ) - pd.testing.assert_frame_equal( - expected_pypsa_custom_constraint_rhs, pypsa_custom_constraint_rhs - ) - - -def test_translate_custom_constraints_lhs(): - ispypsa_custom_constraint_lhs = pd.DataFrame( - { - "variable_name": ["X", "Y", "Z", "W", "F"], - "constraint_id": ["A", "B", "A", "B", "A"], - "term_type": [ - "line_flow", - "generator_capacity", - "generator_output", - "load_consumption", - "storage_output", - ], - "coefficient": [1.0, 2.0, 3.0, 4.0, 5.0], - } - ) - expected_pypsa_custom_constraint_lhs = pd.DataFrame( - { - "variable_name": ["X", "Y", "Z", "W", "F"], - "constraint_name": ["A", "B", "A", "B", "A"], - "coefficient": [1.0, 2.0, 3.0, 4.0, 5.0], - "component": ["Line", "Generator", "Generator", "Load", "Storage"], - "attribute": ["s", "p_nom", "p", "p", "p"], - } - ) - pypsa_custom_constraint_lhs = _translate_custom_constraint_lhs( - [ispypsa_custom_constraint_lhs] - ) - pd.testing.assert_frame_equal( - expected_pypsa_custom_constraint_lhs, pypsa_custom_constraint_lhs - ) +def test_translate_custom_constraints_duplicate_constraint_names(csv_str_to_df): + """Test that ValueError is raised when duplicate constraint names exist in RHS.""" + + # Input: manual custom constraints + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + CONS2, generator_capacity, GEN2, 1.0 + """ + + custom_constraints_rhs_csv = """ + constraint_id, summer_typical + CONS1, 5000 + CONS2, 3000 + """ + + # Input: flow path expansion costs (will create CONS1_expansion_limit) + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw + CONS1, 500 + """ + + # Input: links + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + CONS1, CONS1_exp_2025, AC, BUS1, BUS2, 0, True + """ + + # Create input data + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + "flow_path_expansion_costs": csv_str_to_df(flow_path_expansion_costs_csv), + } + links = csv_str_to_df(links_csv) + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # This will create two constraints named CONS1: + # - One from manual constraints + # - One from flow path expansion limits (CONS1_expansion_limit) + # These don't conflict because they have different names + # Let's create a real duplicate by having duplicate manual constraints + + # Create duplicate manual constraints + custom_constraints_rhs_with_dup_csv = """ + constraint_id, rhs, constraint_type + CONS1, 5000, <= + CONS1, 6000, <= + CONS2, 3000, <= + """ + + ispypsa_tables_with_dup = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_with_dup_csv), + } + + # Should raise ValueError due to duplicate CONS1 in RHS + with pytest.raises( + ValueError, + match="Duplicate constraint names found in custom constraints RHS: \\['CONS1'\\]", + ): + _translate_custom_constraints(config, ispypsa_tables_with_dup, links) + + +def test_translate_custom_constraints_basic_integration(csv_str_to_df): + """Test basic integration of manual constraints and expansion limit constraints.""" + + # Input: manual custom constraints + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + MANUAL1, generator_capacity, GEN1, 1.0 + MANUAL2, link_flow, PATH1, 2.0 + """ + + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + MANUAL1, 5000, <= + MANUAL2, 3000, <= + """ + + # Input: flow path expansion costs + flow_path_expansion_costs_csv = """ + flow_path, additional_network_capacity_mw + PATH1, 500 + PATH2, 800 + """ + + # Input: links + links_csv = """ + isp_name, name, carrier, bus0, bus1, p_nom, p_nom_extendable + PATH1, PATH1_existing, AC, BUS1, BUS2, 1000, False + PATH1, PATH1_exp_2025, AC, BUS1, BUS2, 0, True + PATH2, PATH2_exp_2025, AC, BUS3, BUS4, 0, True + """ + + # Create input data + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + "flow_path_expansion_costs": csv_str_to_df(flow_path_expansion_costs_csv), + } + links = csv_str_to_df(links_csv) + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Call the function under test + result = _translate_custom_constraints(config, ispypsa_tables, links) + + # Check that both manual and expansion limit constraints are present + assert "custom_constraints_lhs" in result + assert "custom_constraints_rhs" in result + + # Expected LHS should have manual constraints and expansion limit constraints + lhs = result["custom_constraints_lhs"] + rhs = result["custom_constraints_rhs"] + + # Check constraint names + expected_constraint_names = { + "MANUAL1", + "MANUAL2", + "PATH1_expansion_limit", + "PATH2_expansion_limit", + } + assert set(lhs["constraint_name"].unique()) == expected_constraint_names + assert set(rhs["constraint_name"].unique()) == expected_constraint_names + + # Verify specific constraints + manual1_lhs = lhs[lhs["constraint_name"] == "MANUAL1"] + assert len(manual1_lhs) == 1 + assert manual1_lhs.iloc[0]["variable_name"] == "GEN1" + assert manual1_lhs.iloc[0]["component"] == "Generator" + + # Link flow constraints should be expanded + manual2_lhs = lhs[lhs["constraint_name"] == "MANUAL2"] + assert len(manual2_lhs) == 2 # PATH1_existing and PATH1_exp_2025 + + # Check RHS values + manual1_rhs = rhs[rhs["constraint_name"] == "MANUAL1"] + assert manual1_rhs.iloc[0]["rhs"] == 5000 + + path1_rhs = rhs[rhs["constraint_name"] == "PATH1_expansion_limit"] + assert path1_rhs.iloc[0]["rhs"] == 500 + + +def test_translate_custom_constraints_no_constraints(csv_str_to_df): + """Test when no custom constraints are provided.""" + + # Empty inputs + ispypsa_tables = {} + links = pd.DataFrame() + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Call the function under test + result = _translate_custom_constraints(config, ispypsa_tables, links) + + # Should return empty dictionary + assert result == {} + + +def test_translate_custom_constraints_mismatched_lhs_rhs(csv_str_to_df): + """Test that ValueError is raised when LHS and RHS constraints don't match.""" + + # Input: manual constraints with mismatched LHS/RHS + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + CONS1, generator_capacity, GEN1, 1.0 + CONS2, generator_capacity, GEN2, 1.0 + """ + + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + CONS1, 5000, <= + CONS3, 3000, <= + """ + + # Create input data + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = pd.DataFrame() + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # Should raise ValueError due to mismatched constraints + with pytest.raises( + ValueError, + match="LHS constraints do not have corresponding RHS definitions.*CONS2", + ): + _translate_custom_constraints(config, ispypsa_tables, links) + + +def test_translate_custom_constraints_duplicate_from_different_sources(csv_str_to_df): + """Test duplicate constraint names from different sources.""" + + # Input: manual constraints + custom_constraints_lhs_csv = """ + constraint_id, term_type, term_id, coefficient + PATH1, generator_capacity, GEN1, 1.0 + PATH1, generator_capacity, GEN2, 1.0 + """ + + custom_constraints_rhs_csv = """ + constraint_id, rhs, constraint_type + PATH1, 5000, <= + PATH1, 3000, <= + """ + + # Create input data + ispypsa_tables = { + "custom_constraints_lhs": csv_str_to_df(custom_constraints_lhs_csv), + "custom_constraints_rhs": csv_str_to_df(custom_constraints_rhs_csv), + } + links = pd.DataFrame() + + # Mock configuration + class MockNetworkConfig: + rez_transmission_expansion = False + + class MockConfig: + network = MockNetworkConfig() + + config = MockConfig() + + # This creates duplicate PATH1 constraints in RHS + # Should raise ValueError + with pytest.raises( + ValueError, + match="Duplicate constraint names found in custom constraints RHS: \\['PATH1'\\]", + ): + _translate_custom_constraints(config, ispypsa_tables, links) + + +def test_validate_lhs_rhs_constraints_matching_constraints(csv_str_to_df): + """Test validation passes when LHS and RHS constraints match exactly.""" + + # Create matching LHS constraints + lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + CONS1, GEN1, 1.0, Generator, p_nom + CONS1, GEN2, 2.0, Generator, p_nom + CONS2, LINK1, 1.0, Link, p + """ + lhs = csv_str_to_df(lhs_csv) + + # Create matching RHS constraints + rhs_csv = """ + constraint_name, rhs, constraint_type + CONS1, 5000, <= + CONS2, 3000, >= + """ + rhs = csv_str_to_df(rhs_csv) + + # Should not raise any exception + _validate_lhs_rhs_constraints(lhs, rhs) + + +def test_validate_lhs_rhs_constraints_empty_dataframes(csv_str_to_df): + """Test validation passes when both LHS and RHS are empty.""" + + lhs = pd.DataFrame() + rhs = pd.DataFrame() + + # Should not raise any exception + _validate_lhs_rhs_constraints(lhs, rhs) + + +def test_validate_lhs_rhs_constraints_lhs_without_rhs(csv_str_to_df): + """Test validation fails when LHS has constraints without matching RHS.""" + + # Create LHS constraints + lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + CONS1, GEN1, 1.0, Generator, p_nom + CONS2, GEN2, 2.0, Generator, p_nom + CONS3, LINK1, 1.0, Link, p + """ + lhs = csv_str_to_df(lhs_csv) + + # Create RHS with missing CONS3 + rhs_csv = """ + constraint_name, rhs, constraint_type + CONS1, 5000, <= + CONS2, 3000, >= + """ + rhs = csv_str_to_df(rhs_csv) + + # Should raise ValueError + with pytest.raises( + ValueError, + match="The following LHS constraints do not have corresponding RHS definitions: \\['CONS3'\\]", + ): + _validate_lhs_rhs_constraints(lhs, rhs) + + +def test_validate_lhs_rhs_constraints_rhs_without_lhs(csv_str_to_df): + """Test validation fails when RHS has constraints without matching LHS.""" + + # Create LHS constraints + lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + CONS1, GEN1, 1.0, Generator, p_nom + CONS2, GEN2, 2.0, Generator, p_nom + """ + lhs = csv_str_to_df(lhs_csv) + + # Create RHS with extra CONS3 + rhs_csv = """ + constraint_name, rhs, constraint_type + CONS1, 5000, <= + CONS2, 3000, >= + CONS3, 1000, == + """ + rhs = csv_str_to_df(rhs_csv) + + # Should raise ValueError + with pytest.raises( + ValueError, + match="The following RHS constraints do not have corresponding LHS definitions: \\['CONS3'\\]", + ): + _validate_lhs_rhs_constraints(lhs, rhs) + + +def test_validate_lhs_rhs_constraints_multiple_mismatches(csv_str_to_df): + """Test validation with multiple mismatched constraints on both sides.""" + + # Create LHS constraints + lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + CONS1, GEN1, 1.0, Generator, p_nom + CONS2, GEN2, 2.0, Generator, p_nom + CONS3, LINK1, 1.0, Link, p + CONS4, LINK2, 2.0, Link, p + """ + lhs = csv_str_to_df(lhs_csv) + + # Create RHS with different constraints + rhs_csv = """ + constraint_name, rhs, constraint_type + CONS1, 5000, <= + CONS2, 3000, >= + CONS5, 2000, == + CONS6, 4000, <= + """ + rhs = csv_str_to_df(rhs_csv) + + # Should raise ValueError about LHS without RHS (checked first) + with pytest.raises( + ValueError, + match="The following LHS constraints do not have corresponding RHS definitions: \\['CONS3', 'CONS4'\\]", + ): + _validate_lhs_rhs_constraints(lhs, rhs) + + +def test_validate_lhs_rhs_constraints_empty_lhs_nonempty_rhs(csv_str_to_df): + """Test validation fails when LHS is empty but RHS has constraints.""" + + lhs = pd.DataFrame() + + # Create non-empty RHS + rhs_csv = """ + constraint_name, rhs, constraint_type + CONS1, 5000, <= + CONS2, 3000, >= + """ + rhs = csv_str_to_df(rhs_csv) + + # Should raise ValueError + with pytest.raises( + ValueError, + match="The following RHS constraints do not have corresponding LHS definitions: \\['CONS1', 'CONS2'\\]", + ): + _validate_lhs_rhs_constraints(lhs, rhs) + + +def test_validate_lhs_rhs_constraints_nonempty_lhs_empty_rhs(csv_str_to_df): + """Test validation fails when LHS has constraints but RHS is empty.""" + + # Create non-empty LHS + lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + CONS1, GEN1, 1.0, Generator, p_nom + CONS2, GEN2, 2.0, Generator, p_nom + """ + lhs = csv_str_to_df(lhs_csv) + + rhs = pd.DataFrame() + + # Should raise ValueError + with pytest.raises( + ValueError, + match="The following LHS constraints do not have corresponding RHS definitions: \\['CONS1', 'CONS2'\\]", + ): + _validate_lhs_rhs_constraints(lhs, rhs) + + +def test_validate_lhs_rhs_constraints_duplicate_lhs_entries(csv_str_to_df): + """Test validation with multiple LHS entries for the same constraint (valid case).""" + + # Create LHS with multiple entries for CONS1 + lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + CONS1, GEN1, 1.0, Generator, p_nom + CONS1, GEN2, 2.0, Generator, p_nom + CONS1, GEN3, -1.0, Generator, p_nom + CONS2, LINK1, 1.0, Link, p + """ + lhs = csv_str_to_df(lhs_csv) + + # Create matching RHS + rhs_csv = """ + constraint_name, rhs, constraint_type + CONS1, 5000, <= + CONS2, 3000, >= + """ + rhs = csv_str_to_df(rhs_csv) + + # Should not raise any exception - multiple LHS entries per constraint is valid + _validate_lhs_rhs_constraints(lhs, rhs) + + +def test_validate_lhs_rhs_constraints_case_sensitive_names(csv_str_to_df): + """Test that constraint name matching is case-sensitive.""" + + # Create LHS with lowercase names + lhs_csv = """ + constraint_name, variable_name, coefficient, component, attribute + cons1, GEN1, 1.0, Generator, p_nom + CONS2, GEN2, 2.0, Generator, p_nom + """ + lhs = csv_str_to_df(lhs_csv) + + # Create RHS with uppercase names + rhs_csv = """ + constraint_name, rhs, constraint_type + CONS1, 5000, <= + CONS2, 3000, >= + """ + rhs = csv_str_to_df(rhs_csv) + + # Should raise ValueError due to case mismatch + with pytest.raises( + ValueError, + match="The following LHS constraints do not have corresponding RHS definitions: \\['cons1'\\]", + ): + _validate_lhs_rhs_constraints(lhs, rhs) diff --git a/tests/test_translator/test_unserved_energy_generators.py b/tests/test_translator/test_unserved_energy_generators.py index 9eac200..3db4d67 100644 --- a/tests/test_translator/test_unserved_energy_generators.py +++ b/tests/test_translator/test_unserved_energy_generators.py @@ -2,7 +2,7 @@ import pandas as pd -from ispypsa.config import load_config +from ispypsa.config import ModelConfig, load_config from ispypsa.data_fetch import read_csvs from ispypsa.templater import ( create_ispypsa_inputs_template, @@ -14,23 +14,18 @@ from ispypsa.translator.generators import _create_unserved_energy_generators -def test_unserved_energy_generator_creation(workbook_table_cache_test_path: Path): +def test_unserved_energy_generator_creation( + sample_ispypsa_tables: dict[str, pd.DataFrame], + sample_model_config: ModelConfig, +): """Test that unserved energy generators are created when cost is specified.""" - iasr_tables = read_csvs(workbook_table_cache_test_path) - manual_tables = load_manually_extracted_tables("6.0") - config = load_config(Path(__file__).parent / Path("ispypsa_config.yaml")) - # Set unserved energy cost for testing - config.unserved_energy.cost = 10000.0 - config.unserved_energy.generator_size_mw = 5000.0 - - template_tables = create_ispypsa_inputs_template( - config.scenario, - config.network.nodes.regional_granularity, - iasr_tables, - manual_tables, + sample_model_config.unserved_energy.cost = 10000.0 + sample_model_config.unserved_energy.generator_size_mw = 5000.0 + + pypsa_tables = create_pypsa_friendly_inputs( + sample_model_config, sample_ispypsa_tables ) - pypsa_tables = create_pypsa_friendly_inputs(config, template_tables) # Check for unserved energy generators generators = pypsa_tables["generators"] @@ -50,23 +45,16 @@ def test_unserved_energy_generator_creation(workbook_table_cache_test_path: Path def test_no_unserved_energy_generators_when_cost_is_none( - workbook_table_cache_test_path: Path, + sample_ispypsa_tables: dict[str, pd.DataFrame], + sample_model_config: ModelConfig, ): """Test that no unserved energy generators are created when cost is None.""" - iasr_tables = read_csvs(workbook_table_cache_test_path) - manual_tables = load_manually_extracted_tables("6.0") - config = load_config(Path(__file__).parent / Path("ispypsa_config.yaml")) - # Ensure unserved energy cost is None - config.unserved_energy.cost = None + sample_model_config.unserved_energy.cost = None - template_tables = create_ispypsa_inputs_template( - config.scenario, - config.network.nodes.regional_granularity, - iasr_tables, - manual_tables, + pypsa_tables = create_pypsa_friendly_inputs( + sample_model_config, sample_ispypsa_tables ) - pypsa_tables = create_pypsa_friendly_inputs(config, template_tables) # Check that no unserved energy generators exist generators = pypsa_tables["generators"] diff --git a/tests/test_translator_and_model/test_limit_on_transmission_expansion.py b/tests/test_translator_and_model/test_limit_on_transmission_expansion.py new file mode 100644 index 0000000..bf412f5 --- /dev/null +++ b/tests/test_translator_and_model/test_limit_on_transmission_expansion.py @@ -0,0 +1,229 @@ +from pathlib import Path + +import pandas as pd +from isp_trace_parser.demand_traces import write_new_demand_filepath + +from ispypsa.config import ModelConfig +from ispypsa.model.build import build_pypsa_network +from ispypsa.translator.create_pypsa_friendly_inputs import ( + create_pypsa_friendly_inputs, + create_pypsa_friendly_timeseries_inputs, +) + + +def test_flow_path_expansion_limit_respected(csv_str_to_df, tmp_path, monkeypatch): + """Test that link expansion spanning multiple investment periods respects + total limit for flow path. + + This test creates a simple two-region network (A and B) where: + - Region A has an expensive generator and a demand of 100 MW and 200 MW in the two + investment periods. + - Region B has a cheap generator and no demand + - The existing transmission link can only carry 50 MW (half the demand) + - Link expansion costs are cheap in both years + - the total allowable expansion for the flow path is 100 MW + + We would expect that in the first investment period 50 MW of expansion occurs and + in the second investment period another 50 MW of expansion occurs. + """ + # Create directories + ispypsa_dir = tmp_path / "ispypsa_inputs" + ispypsa_dir.mkdir() + pypsa_dir = tmp_path / "pypsa_inputs" + pypsa_dir.mkdir() + traces_dir = tmp_path / "traces" + traces_dir.mkdir() + + # Create subdirectories for traces + for subdir in ["demand", "wind", "solar"]: + (traces_dir / subdir).mkdir() + + # Mock environment variable for trace parser + monkeypatch.setenv("PATH_TO_PARSED_TRACES", str(traces_dir)) + + # Create a mock config + config_dict = { + "ispypsa_run_name": "test", + "scenario": "Step Change", + "wacc": 0.07, + "discount_rate": 0.05, + "network": { + "transmission_expansion": True, + "transmission_expansion_limit_override": None, + "rez_transmission_expansion": False, + "rez_connection_expansion_limit_override": None, + "annuitisation_lifetime": 30, + "nodes": { + "regional_granularity": "sub_regions", + "rezs": "attached_to_parent_node", + }, + "rez_to_sub_region_transmission_default_limit": 1e5, + }, + "temporal": { + "path_to_parsed_traces": "ENV", + "year_type": "fy", + "range": { + "start_year": 2025, + "end_year": 2026, + }, + "capacity_expansion": { + "resolution_min": 30, + "reference_year_cycle": [2018], + "investment_periods": [2025, 2026], + "aggregation": { + "representative_weeks": None, # Use full year + }, + }, + }, + "unserved_energy": { + "cost": 10000.0, + "generator_size_mw": 1000.0, + }, + "solver": "highs", + "iasr_workbook_version": "6.0", + } + + demand_data_to_write = [ + ("2024-08-01 00:00:00", 0.0, "A", "2024-2"), + ("2025-05-01 00:00:00", 100.0, "A", "2025-1"), + ("2025-08-01 00:00:00", 0.0, "A", "2025-2"), + ("2026-05-01 00:00:00", 200.0, "A", "2026-1"), + ("2024-08-01 00:00:00", 0.0, "B", "2024-2"), + ("2025-05-01 00:00:00", 0.0, "B", "2025-1"), + ("2025-08-01 00:00:00", 0.0, "B", "2025-2"), + ("2026-05-01 00:00:00", 0.0, "B", "2026-1"), + ] + + for date_time, demand, subregion, half_year in demand_data_to_write: + demand_data = pd.DataFrame({"Datetime": [date_time], "Value": [demand]}) + demand_data["Datetime"] = pd.to_datetime(demand_data["Datetime"]) + file_meta_data = { + "subregion": subregion, + "scenario": "Step Change", + "reference_year": 2018, + "poe": "POE50", + "demand_type": "OPSO_MODELLING", + "hy": half_year, + } + file_path = Path( + traces_dir / "demand" / write_new_demand_filepath(file_meta_data) + ) + file_path.parent.mkdir(parents=True, exist_ok=True) + demand_data.to_parquet(file_path, index=False) + + # Define ISPyPSA input tables + + # Sub-regions table + sub_regions_csv = """ + isp_sub_region_id, nem_region_id, sub_region_reference_node, sub_region_reference_node_voltage_kv + A, A, A Reference Node, 500 + B, B, B Reference Node, 500 + """ + sub_regions = csv_str_to_df(sub_regions_csv) + + # Flow paths table + flow_paths_csv = """ + flow_path, node_from, node_to, carrier, forward_direction_mw_summer_typical, reverse_direction_mw_summer_typical + A-B, A, B, AC, 50, 50 + """ + flow_paths = csv_str_to_df(flow_paths_csv) + + # Flow path expansion costs table + flow_path_expansion_costs_csv = """ + flow_path, option, additional_network_capacity_mw, 2024_25_$/mw, 2025_26_$/mw + A-B, Opt1, 100, 1.0, 1.0 + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + # ECAA Generators table (existing generators) + # At the moment Brown Coal cost is hard coded to 30 $/MWh and Liquid Fuel to + # 400 $/MWh. "__" gets converted to a space. + ecaa_generators_csv = """ + generator, fuel_type, sub_region_id, maximum_capacity_mw + expensive_generator_A, Liquid__Fuel, A, 200 + cheap_generator_B, Brown__Coal, B, 200 + """ + ecaa_generators = csv_str_to_df(ecaa_generators_csv) + + # Minimal versions of other required tables + new_entrant_generators_csv = """ + generator, fuel_type, technology_type, sub_region_id + """ + new_entrant_generators = csv_str_to_df(new_entrant_generators_csv) + + # Collect all ISPyPSA tables + ispypsa_tables = { + "sub_regions": sub_regions, + "flow_paths": flow_paths, + "flow_path_expansion_costs": flow_path_expansion_costs, + "ecaa_generators": ecaa_generators, + # Add empty DataFrames for other tables + "new_entrant_generators": pd.DataFrame(), + } + + # Create a ModelConfig instance + config = ModelConfig(**config_dict) + + # Translate ISPyPSA tables to PyPSA-friendly format + pypsa_tables = create_pypsa_friendly_inputs(config, ispypsa_tables) + + # Manually create a short hardcoded snapshots so the model works with our short + # time series data. + snapshots = pd.DataFrame( + { + "investment_periods": [2025, 2026], + "snapshots": ["2025-05-01 00:00:00", "2026-05-01 00:00:00"], + } + ) + snapshots["snapshots"] = pd.to_datetime(snapshots["snapshots"]) + + # Override the longer snapshots that would have auto generated. + pypsa_tables["snapshots"] = snapshots + + # Create timeseries data directory structure for PyPSA inputs + pypsa_timeseries_dir = pypsa_dir / "timeseries" + pypsa_timeseries_dir.mkdir(parents=True) + + # Create demand traces for the network model + create_pypsa_friendly_timeseries_inputs( + config=config, + model_phase="capacity_expansion", + ispypsa_tables=ispypsa_tables, + snapshots=snapshots, + parsed_traces_directory=traces_dir, + pypsa_friendly_timeseries_inputs_location=pypsa_timeseries_dir, + ) + + # Build the network model + network = build_pypsa_network( + pypsa_friendly_tables=pypsa_tables, + path_to_pypsa_friendly_timeseries_data=pypsa_timeseries_dir, + ) + + # Solve the optimization problem + network.optimize.solve_model( + solver_name=config.solver, + ) + + # Check that nominal link capacities are as expected. + links = network.links.reset_index().loc[:, ["Link", "p_nom_opt"]] + expected_links = """ + Link, p_nom_opt + A-B_existing, 50.0 + A-B_exp_2025, 50.0 + A-B_exp_2026, 50.0 + """ + expected_links = csv_str_to_df(expected_links) + pd.testing.assert_frame_equal(links, expected_links) + + # Check that link dispatch is as expected. In particular that A-B_exp_2026 is only + # dispatch in the second time interval. + links_t = network.links_t.p0.reset_index(drop=True) + links_t.columns.name = None + expected_links_t = """ + A-B_existing, A-B_exp_2025, A-B_exp_2026 + -50.0, -50.0, 0.0 + -50.0, -50.0, -50.0 + """ + expected_links_t = csv_str_to_df(expected_links_t) + pd.testing.assert_frame_equal(links_t, expected_links_t) diff --git a/tests/test_translator_and_model/test_time_varying_flow_path_costs.py b/tests/test_translator_and_model/test_time_varying_flow_path_costs.py new file mode 100644 index 0000000..970a3d5 --- /dev/null +++ b/tests/test_translator_and_model/test_time_varying_flow_path_costs.py @@ -0,0 +1,225 @@ +from pathlib import Path + +import pandas as pd +from isp_trace_parser.demand_traces import write_new_demand_filepath + +from ispypsa.config import ModelConfig +from ispypsa.model.build import build_pypsa_network +from ispypsa.translator.create_pypsa_friendly_inputs import ( + create_pypsa_friendly_inputs, + create_pypsa_friendly_timeseries_inputs, +) + + +def test_link_expansion_economic_timing(csv_str_to_df, tmp_path, monkeypatch): + """Test that link expansion occurs when it becomes economically viable. + + This test creates a simple two-region network (A and B) where: + - Region A has an expensive generator and fixed demand of 100 MW + - Region B has a cheap generator and no demand + - The existing transmission link can only carry 50 MW (half the demand) + - Link expansion costs change between years, making expansion economic in year 2 + + The test uses the translator to convert ISPyPSA format tables to PyPSA format. + """ + # Create directories + ispypsa_dir = tmp_path / "ispypsa_inputs" + ispypsa_dir.mkdir() + pypsa_dir = tmp_path / "pypsa_inputs" + pypsa_dir.mkdir() + traces_dir = tmp_path / "traces" + traces_dir.mkdir() + + # Create subdirectories for traces + for subdir in ["demand", "wind", "solar"]: + (traces_dir / subdir).mkdir() + + # Mock environment variable for trace parser + monkeypatch.setenv("PATH_TO_PARSED_TRACES", str(traces_dir)) + + # Create a mock config + config_dict = { + "ispypsa_run_name": "test", + "scenario": "Step Change", + "wacc": 0.07, + "discount_rate": 0.05, + "network": { + "transmission_expansion": True, + "transmission_expansion_limit_override": None, + "rez_transmission_expansion": False, + "rez_connection_expansion_limit_override": None, + "annuitisation_lifetime": 30, + "nodes": { + "regional_granularity": "sub_regions", + "rezs": "attached_to_parent_node", + }, + "rez_to_sub_region_transmission_default_limit": 1e5, + }, + "temporal": { + "path_to_parsed_traces": "ENV", + "year_type": "fy", + "range": { + "start_year": 2025, + "end_year": 2026, + }, + "capacity_expansion": { + "resolution_min": 30, + "reference_year_cycle": [2018], + "investment_periods": [2025, 2026], + "aggregation": { + "representative_weeks": None, # Use full year + }, + }, + }, + "unserved_energy": { + "cost": 10000.0, + "generator_size_mw": 1000.0, + }, + "solver": "highs", + "iasr_workbook_version": "6.0", + } + + demand_data_to_write = [ + ("2024-08-01 00:00:00", 0.0, "A", "2024-2"), + ("2025-05-01 00:00:00", 250.0, "A", "2025-1"), + ("2025-08-01 00:00:00", 0.0, "A", "2025-2"), + ("2026-05-01 00:00:00", 250.0, "A", "2026-1"), + ("2024-08-01 00:00:00", 0.0, "B", "2024-2"), + ("2025-05-01 00:00:00", 0.0, "B", "2025-1"), + ("2025-08-01 00:00:00", 0.0, "B", "2025-2"), + ("2026-05-01 00:00:00", 0.0, "B", "2026-1"), + ] + + for date_time, demand, subregion, half_year in demand_data_to_write: + demand_data = pd.DataFrame({"Datetime": [date_time], "Value": [demand]}) + demand_data["Datetime"] = pd.to_datetime(demand_data["Datetime"]) + file_meta_data = { + "subregion": subregion, + "scenario": "Step Change", + "reference_year": 2018, + "poe": "POE50", + "demand_type": "OPSO_MODELLING", + "hy": half_year, + } + file_path = Path( + traces_dir / "demand" / write_new_demand_filepath(file_meta_data) + ) + file_path.parent.mkdir(parents=True, exist_ok=True) + demand_data.to_parquet(file_path, index=False) + + # Define ISPyPSA input tables + + # Sub-regions table + sub_regions_csv = """ + isp_sub_region_id, nem_region_id, sub_region_reference_node, sub_region_reference_node_voltage_kv + A, A, A Reference Node, 500 + B, B, B Reference Node, 500 + """ + sub_regions = csv_str_to_df(sub_regions_csv) + + # Flow paths table + flow_paths_csv = """ + flow_path, node_from, node_to, carrier, forward_direction_mw_summer_typical, reverse_direction_mw_summer_typical + A-B, A, B, AC, 50, 50 + """ + flow_paths = csv_str_to_df(flow_paths_csv) + + # Flow path expansion costs table + flow_path_expansion_costs_csv = """ + flow_path, option, additional_network_capacity_mw, 2024_25_$/mw, 2025_26_$/mw + A-B, Opt1, 150, 1000000, 0.0 + """ + flow_path_expansion_costs = csv_str_to_df(flow_path_expansion_costs_csv) + + # ECAA Generators table (existing generators) + # At the moment Brown Coal cost is hard coded to 30 $/MWh and Liquid Fuel to + # 400 $/MWh. "__" gets converted to a space. + ecaa_generators_csv = """ + generator, fuel_type, sub_region_id, maximum_capacity_mw + expensive_generator_A, Liquid__Fuel, A, 200 + cheap_generator_B, Brown__Coal, B, 200 + """ + ecaa_generators = csv_str_to_df(ecaa_generators_csv) + + # Minimal versions of other required tables + new_entrant_generators_csv = """ + generator, fuel_type, technology_type, sub_region_id + """ + new_entrant_generators = csv_str_to_df(new_entrant_generators_csv) + + # Collect all ISPyPSA tables + ispypsa_tables = { + "sub_regions": sub_regions, + "flow_paths": flow_paths, + "flow_path_expansion_costs": flow_path_expansion_costs, + "ecaa_generators": ecaa_generators, + # Add empty DataFrames for other tables + "new_entrant_generators": pd.DataFrame(), + } + + # Create a ModelConfig instance + config = ModelConfig(**config_dict) + + # Translate ISPyPSA tables to PyPSA-friendly format + pypsa_tables = create_pypsa_friendly_inputs(config, ispypsa_tables) + + # Manually create a short hardcoded snapshots so the model works with our short + # time series data. + snapshots = pd.DataFrame( + { + "investment_periods": [2025, 2026], + "snapshots": ["2025-05-01 00:00:00", "2026-05-01 00:00:00"], + } + ) + snapshots["snapshots"] = pd.to_datetime(snapshots["snapshots"]) + + # Override the longer snapshots that would have auto generated. + pypsa_tables["snapshots"] = snapshots + + # Create timeseries data directory structure for PyPSA inputs + pypsa_timeseries_dir = pypsa_dir / "timeseries" + pypsa_timeseries_dir.mkdir(parents=True) + + # Create demand traces for the network model + create_pypsa_friendly_timeseries_inputs( + config=config, + model_phase="capacity_expansion", + ispypsa_tables=ispypsa_tables, + snapshots=snapshots, + parsed_traces_directory=traces_dir, + pypsa_friendly_timeseries_inputs_location=pypsa_timeseries_dir, + ) + + # Build the network model + network = build_pypsa_network( + pypsa_friendly_tables=pypsa_tables, + path_to_pypsa_friendly_timeseries_data=pypsa_timeseries_dir, + ) + + # Solve the optimization problem + network.optimize.solve_model( + solver_name=config.solver, + ) + + # Check that nominal link capacities are as expected. + links = network.links.reset_index().loc[:, ["Link", "p_nom_opt"]] + expected_links = """ + Link, p_nom_opt + A-B_existing, 50.0 + A-B_exp_2025, 0.0 + A-B_exp_2026, 150.0 + """ + expected_links = csv_str_to_df(expected_links) + pd.testing.assert_frame_equal(links, expected_links) + + # Check that link dispatch is as expected. In particular that A-B_exp_2026 is only + # dispatch in the second time interval. + links_t = network.links_t.p0.reset_index(drop=True) + links_t.columns.name = None + expected_links_t = """ + A-B_existing, A-B_exp_2025, A-B_exp_2026 + -50.0, 0.0, 0.0 + -50.0, 0.0, -150.0 + """ + expected_links_t = csv_str_to_df(expected_links_t) + pd.testing.assert_frame_equal(links_t, expected_links_t) diff --git a/tests/test_workbook_table_cache/flow_path_augmentation_costs_step_change_and_green_energy_exports_CNSW-NNSW.csv b/tests/test_workbook_table_cache/flow_path_augmentation_costs_step_change_and_green_energy_exports_CNSW-NNSW.csv new file mode 100644 index 0000000..3ecdc14 --- /dev/null +++ b/tests/test_workbook_table_cache/flow_path_augmentation_costs_step_change_and_green_energy_exports_CNSW-NNSW.csv @@ -0,0 +1,6 @@ +Flow path,Option Name,2021-22,2022-23,2023-24,2024-25,2025-26,2026-27,2027-28,2028-29,2029-30,2030-31,2031-32,2032-33,2033-34,2034-35,2035-36,2036-37,2037-38,2038-39,2039-40,2040-41,2041-42,2042-43,2043-44,2044-45,2045-46,2046-47,2047-48,2048-49,2049-50,2050-51,2051-52,2052-53,2053-54 +CNSW-NNSW,CNSW-NNSW Option 1,1834410577.13,1839984585.4913588,1870490675.9237769,1901016121.5390062,1920883577.9873168,1930908055.5877812,1943401003.800204,1955223742.3465283,1966540533.8132858,1977435998.7638278,1979577429.9072478,1981758153.365226,1983978169.1377623,1986237477.2248569,1988526254.5478702,1990864147.2640808,1993241332.29485,1995657809.6401772,1998113579.3000627,2000539879.7240293,2002976003.2266355,2005412126.7292418,2007838427.1532087,2010274550.655815,2012710674.1584213,2015146797.6610277,2017582921.1636338,2020019044.6662402,2022455168.1688464,2024891291.6714528,2027327415.174059,2029763538.6766653,2032199662.1792715 +CNSW-NNSW,CNSW-NNSW Option 2,1492549482.75,1498368669.1638362,1522299684.6067889,1547240613.4523966,1564450054.2369545,1573439757.1331573,1584194116.2320602,1594242888.5867147,1603759294.408156,1614609348.358562,1616684033.8505893,1618796786.9663234,1620947607.7057645,1623136496.0689127,1625353935.1498408,1627618958.7604027,1629922049.9946716,1632263208.852647,1634642435.3343298,1636993111.0982323,1639353303.7680616,1641713496.4378908,1644064172.2017932,1646424364.8716223,1648784557.5414515,1651144750.2112808,1653504942.88111,1655865135.550939,1658225328.2207682,1660585520.8905976,1662945713.5604267,1665305906.230256,1667666098.9000852 +CNSW-NNSW,CNSW-NNSW Option 3,2451577025.7,2463020830.0122647,2502997770.0901384,2545275893.483528,2575674869.1368957,2591488625.803442,2609967764.215165,2627209212.818076,2643522207.6386623,2666347675.2928495,2670535825.244838,2674800821.9849367,2679142665.513145,2683561355.8294635,2688037681.236864,2692610065.129402,2697259295.8100495,2701985373.2788076,2706788297.5356755,2711533586.7014604,2716298087.5642734,2721062588.427086,2725807877.592871,2730572378.455684,2735336879.3184967,2740101380.181309,2744865881.044122,2749630381.9069347,2754394882.7697477,2759159383.6325603,2763923884.495373,2768688385.3581862,2773452886.2209983 +CNSW-NNSW,CNSW-NNSW Option 4,2544205834.44,2570694957.328485,2622183967.2558355,2666780915.346864,2694522775.7770452,2707824210.660078,2721435908.9473414,2735654907.380341,2750909197.5317435,2762078944.579713,2764401889.2216697,2766767456.701094,2769175647.017986,2771626460.1723437,2774109240.454803,2776645299.284096,2779223980.9508553,2781845285.4550824,2784509212.7967763,2787141173.0103703,2789783788.9333305,2792426404.8562913,2795058365.069885,2797700980.9928455,2800343596.915806,2802986212.8387666,2805628828.7617273,2808271444.684687,2810914060.607648,2813556676.530608,2816199292.453569,2818841908.376529,2821484524.29949 +CNSW-NNSW,CNSW-NNSW Option 5,2795731374.64,2824605711.510762,2880109217.913806,2929211740.0852356,2960356667.7182274,2975470163.7745647,2990930764.300607,3006892333.8963594,3023860923.289551,3038206140.820386,3040979041.9005356,3043802821.899586,3046677480.8175387,3049603018.654393,3052566715.6804237,3055594011.355082,3058672185.948642,3061801239.4611034,3064981171.892467,3068122945.1346545,3071277438.106567,3074431931.07848,3077573704.3206663,3080728197.292579,3083882690.2644916,3087037183.2364044,3090191676.208317,3093346169.1802297,3096500662.152142,3099655155.124055,3102809648.0959673,3105964141.06788,3109118634.0397925 diff --git a/tests/test_workbook_table_cache/flow_path_augmentation_options_CNSW-NNSW.csv b/tests/test_workbook_table_cache/flow_path_augmentation_options_CNSW-NNSW.csv new file mode 100644 index 0000000..5e16304 --- /dev/null +++ b/tests/test_workbook_table_cache/flow_path_augmentation_options_CNSW-NNSW.csv @@ -0,0 +1,6 @@ +Flow path,Development path,Development Driver,Option Name,Augmentation Description,Forward direction power flow,"Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Forward direction","Notional transfer level increase (MW) Note: Same increase applies to all transfer limit conditions (Peak demand, Summer typical and Winter reference)_Reverse direction","Indicative cost estimate ($2023, $ million) - Note 2",Cost estimate source,Cost estimate class,Easement length in km,Lead time or Earliest in Service Date & 4,Additional REZ hosting capacity provided +CNSW-NNSW,Near the existing CNSW-NNSW corridor,"Increase thermal capacity and, voltage and transient stability limits of 330 kV lines between Liddell and Armidale. Provide access to Renewable generation in N1 and N2 REZ.",CNSW-NNSW Option 1,• New Central (Hub 5) 500/330 kV substation with 3 x 500/330/33 kV 1500 MVA transformers cut into existing line between Tamworth and Armidale. • New 500 kV DCST line between Central (Hub 5) and Bayswater with Quad Orange conductor. • 4 x 500 kV 150 MVAr line shunt reactors (in total) are required for 500kV DCST line between Central Hub 5 and Bayswater. • New 4 x 330 kV 340 MVA phase shifting transformers at Central (Hub 5).,CNSW to NNSW,3000,3000,1834,AEMO (TCD),Class 5b,225,Sep-28 (See note 10),N2: 2000 +CNSW-NNSW,Near the existing CNSW-NNSW corridor,"Increase thermal capacity and, voltage and transient stability limits of 330 kV lines between Liddell and Armidale. Provide access to Renewable generation in N1 and N2 REZ.",CNSW-NNSW Option 2,• Expand Northern (Hub 10) switching station to 500/330kV substation with 3 x 500/330/33kV 1500 MVA transformers and cut into the existing 330 kV lines between Armidale to Sapphire/Dumaresq • Expand Central South (Hub 1) switching station to 500/330 kV substation with 3 x 500/330/33 kV 1500 MVA transformers. • New 500 kV DCST from Central South (Hub 1) to Bayswater with Quad Orange conductor • Operate line between Central Hub 5 and Central South Hub 1 from 330 kV to 500 kV. • Operate line between Central Hub 5 and Northern Hub 10 from 330 kV to 500 kV. • 4 x 500 kV 150 MVAr line shunt reactors (in total) are required for 500 kV double-circuit line between Central South (Hub 1) and Bayswater.,• Expand Northern (Hub 10) switching station to 500/330kV substation with 3 x 500/330/33kV 1500 MVA transformers and cut into the existing 330 kV lines between Armidale to Sapphire/Dumaresq • Expand Central South (Hub 1) switching station to 500/330 kV substation with 3 x 500/330/33 kV 1500 MVA transformers. • New 500 kV DCST from Central South (Hub 1) to Bayswater with Quad Orange conductor • Operate line between Central Hub 5 and Central South Hub 1 from 330 kV to 500 kV. • Operate line between Central Hub 5 and Northern Hub 10 from 330 kV to 500 kV. • 4 x 500 kV 150 MVAr line shunt reactors (in total) are required for 500 kV double-circuit line between Central South (Hub 1) and Bayswater.,3000,3000,1493,AEMO (TCD),Class 5b,217,Long,N2: 3000 +CNSW-NNSW,Near the existing CNSW-NNSW corridor,"Increase thermal capacity and, voltage and transient stability limits of 330 kV lines between Liddell and Armidale. Provide access to Renewable generation in N1 and N2 REZ.",CNSW-NNSW Option 3,"• New Central South (Hub 1) 500/330 kV substation in New England with 3 x 500/330/33 kV 1500 MVA transformers. • New 330 kV Central (Hub5) switching station in New England and cut into the existing lines between Tamworth and Armidale. • New 500 kV built and initially 330 kV operated double-circuit line from Hub 5 to Hub 1 • New 500 kV double-circuit line between Hub 1 and Bayswater with Quad Orange conductor. • 4 x 500 kV 150 MVAr line shunt reactors (in total) are required for 500 kV double-circuit line between Hub 1 and Bayswater. • Rebuild portion of Line 86 from Hub 5 to Tamworth as 330 kV double-circuit line. • Rebuild Line 88 Tamworth - Muswellbrook and Line 83 Liddell - Muswellbrook as 330 kV double-circuit line. • Augment Hub 5, Tamworth, Muswellbrook and Liddell to accommodate additional lines.","• New Central South (Hub 1) 500/330 kV substation in New England with 3 x 500/330/33 kV 1500 MVA transformers. • New 330 kV Central (Hub5) switching station in New England and cut into the existing lines between Tamworth and Armidale. • New 500 kV built and initially 330 kV operated double-circuit line from Hub 5 to Hub 1 • New 500 kV double-circuit line between Hub 1 and Bayswater with Quad Orange conductor. • 4 x 500 kV 150 MVAr line shunt reactors (in total) are required for 500 kV double-circuit line between Hub 1 and Bayswater. • Rebuild portion of Line 86 from Hub 5 to Tamworth as 330 kV double-circuit line. • Rebuild Line 88 Tamworth - Muswellbrook and Line 83 Liddell - Muswellbrook as 330 kV double-circuit line. • Augment Hub 5, Tamworth, Muswellbrook and Liddell to accommodate additional lines.",3600,3600,2452,AEMO (TCD),Class 5b,225,Long,N1+N2: 3600 +CNSW-NNSW,Near the existing CNSW-NNSW corridor,"Increase thermal capacity and, voltage and transient stability limits of 330 kV lines between Liddell and Armidale. Provide access to Renewable generation in N1 and N2 REZ.",CNSW-NNSW Option 4,• 2000 MW bi-pole HVDC transmission system between locality Bayswater and locality of Hub 5. • A new 330 kV double-circuit line from a new substation in locality of Hub 5 to Armidale. • Reconnect both Tamworth-Armidale 330 kV lines from Armidale to a new substation in locality of Hub 5.,• 2000 MW bi-pole HVDC transmission system between locality Bayswater and locality of Hub 5. • A new 330 kV double-circuit line from a new substation in locality of Hub 5 to Armidale. • Reconnect both Tamworth-Armidale 330 kV lines from Armidale to a new substation in locality of Hub 5.,1750,2000,2544,AEMO (TCD),Class 5b,280,Long,N2: 2000 +CNSW-NNSW,West of the existing CNSW-NNSW corridor,West of the existing CNSW-NNSW corridor,CNSW-NNSW Option 5,• A 2000 MW bi-pole HVDC transmission system between locality of Wollar and locality of Boggabri. • A new 330 kV AC line between locality of Boggabri and Tamworth.,• A 2000 MW bi-pole HVDC transmission system between locality of Wollar and locality of Boggabri. • A new 330 kV AC line between locality of Boggabri and Tamworth.,1750,2000,2796,AEMO (TCD),Class 5b,350,Long,N1: 2000 diff --git a/tests/test_workbook_table_cache/rez_augmentation_costs_step_change_and_green_energy_exports_NSW.csv b/tests/test_workbook_table_cache/rez_augmentation_costs_step_change_and_green_energy_exports_NSW.csv new file mode 100644 index 0000000..c5a319f --- /dev/null +++ b/tests/test_workbook_table_cache/rez_augmentation_costs_step_change_and_green_energy_exports_NSW.csv @@ -0,0 +1,30 @@ +REZ / Constraint ID,REZ Name,Option,2021-22,2022-23,2023-24,2024-25,2025-26,2026-27,2027-28,2028-29,2029-30,2030-31,2031-32,2032-33,2033-34,2034-35,2035-36,2036-37,2037-38,2038-39,2039-40,2040-41,2041-42,2042-43,2043-44,2044-45,2045-46,2046-47,2047-48,2048-49,2049-50,2050-51,2051-52,2052-53,2053-54 +N1,North West NSW,Option 1,4683570694.16,4703516536.587628,4775599569.90908,4855161748.758409,4914331441.04575,4944752228.921412,4980211168.8201885,5013161859.253148,5044274310.512051,5086983852.377509,5095732142.107587,5104640950.91528,5113710278.80059,5122940125.763515,5132290362.034653,5141841247.152812,5151552651.348585,5161424574.621976,5171457016.972982,5181369070.015777,5191321252.827974,5201273435.640174,5211185488.682967,5221137671.495166,5231089854.307364,5241042037.119563,5250994219.931761,5260946402.743958,5270898585.556158,5280850768.368356,5290802951.180554,5300755133.992752,5310707316.80495 +N2,New England,Option 1,370392725.98,372722679.4854966,379824215.2963512,386767380.2319919,391487462.1087017,393870708.99457115,396672750.1753738,399355739.3182176,401941997.47199464,405273274.02934086,405901299.65604,406540848.6887337,407191921.12742186,407854516.9721045,408525755.371283,409211398.02795464,409908564.09062064,410617253.5592813,411337466.4339363,412049036.7540955,412763487.9257533,413477939.0974111,414189509.4175703,414903960.5892281,415618411.76088583,416332862.93254364,417047314.10420144,417761765.2758593,418476216.44751704,419190667.61917484,419905118.79083264,420619569.96249044,421334021.13414824 +N2,New England,Option 2,1004401782,1011204959.0608695,1030510596.3865469,1051698711.1524805,1067967373.7033664,1075384724.442036,1083891439.372567,1080930713.9663014,1088835747.5478656,1098338157.6225178,1101521443.5996315,1104763138.4937568,1108063242.3048933,1111421755.0330405,1114824074.4489467,1118299405.0111167,1121833144.4902983,1125425292.8864906,1129075850.1996946,1132682600.82514,1136303953.6798382,1139925306.5345366,1143532057.159982,1147153410.0146804,1150774762.8693786,1154396115.7240767,1158017468.578775,1161638821.4334733,1165260174.2881715,1168881527.1428697,1172502879.997568,1176124232.852266,1179745585.7069643 +N2,New England,Option 3,647203680.09,651990774.0911866,664965036.4823755,679100470.3606932,690132640.2294028,695099176.6696088,700758267.7185646,706235776.1617585,711574725.4968332,719240649.0630598,721505406.3417673,723811718.7999008,726159586.4374601,728549009.2544451,730969598.4559995,733442131.6318363,735956219.9870988,738511863.5217872,741109062.2359015,743675094.5654461,746251515.6898476,748827936.8142488,751393969.1437936,753970390.2681949,756546811.3925962,759123232.5169975,761699653.6413989,764276074.7658002,766852495.8902014,769428917.0146028,772005338.1390041,774581759.2634053,777158180.3878065 +N3,Central-West Orana,Central West Orana REZ transmission link,Anticipated project,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +N3,Central-West Orana,Option 1,243410039.19,242986106.28472227,247406498.6209051,251298062.38263893,253040851.28678718,253610745.6513225,254692821.9521056,255832786.34974557,257031912.34130797,257043196.70366573,257046109.9703669,257049076.69150293,257052096.86707386,257055170.49707967,257058284.21791163,257061464.75678724,257064698.7500977,257067986.19784302,257071327.10002327,257074627.91137734,257077942.08634013,257081256.26130292,257084557.07265696,257087871.24761972,257091185.4225825,257094499.5975453,257097813.77250808,257101127.94747087,257104442.12243366,257107756.29739648,257111070.47235924,257114384.64732203,257117698.82228482 +N3,Central-West Orana,Option 2,329782042.82,331112953.9979033,336959977.87500226,342841615.74305683,346859456.10402524,348774840.16777277,351114398.03431505,353358435.9742006,355534331.4406314,358388504.1917473,358936636.9318562,359494827.153435,360063074.85648364,360641380.04100215,361227228.33662313,361825648.4840815,362434126.11300975,363052661.2234078,363681253.81527585,364302303.29604137,364925867.1471744,365549430.99830747,366170480.4790731,366794044.3302061,367417608.18133914,368041172.0324722,368664735.88360524,369288299.7347383,369911863.58587134,370535427.43700445,371158991.28813744,371782555.1392705,372406118.99040353 +N3,Central-West Orana,Option 3,272858295.37,274691352.47226685,280108848.34253716,285335945.4057081,288803657.66047937,290527255.91549844,292568214.39660907,294544174.2947216,296464843.77767247,299050274.0024961,299506845.8003534,299971795.0623915,300445121.7886105,300926825.9790104,301414813.26754594,301913272.3863076,302420108.96925,302935323.01637334,303458914.52767766,303976222.94084615,304495625.72006,305015028.4992737,305532336.9124423,306051739.6916561,306571142.4708699,307090545.2500836,307609948.0292975,308129350.8085113,308648753.58772504,309168156.3669389,309687559.1461526,310206961.92536634,310726364.7045802 +N4,Broken Hill,Option 1,5097859988.37,5117220290.986921,5184097635.63244,5254876296.384607,5302026055.489117,5331501877.137287,5365979582.17867,5397057624.394069,5425595998.865156,5457995363.637974,5460280959.514247,5462608492.929535,5464977963.883836,5467389372.377151,5469832234.024727,5472327517.59607,5474864738.706429,5477443897.355801,5480064993.544187,5482654636.578313,5485254763.997192,5487854891.416071,5490444534.450197,5493044661.869077,5495644789.287955,5498244916.706835,5500845044.125714,5503445171.544593,5506045298.963472,5508645426.382351,5511245553.80123,5513845681.22011,5516445808.6389885 +N4,Broken Hill,Option 2,4575560873.27,4636165564.515622,4728471231.646421,4819838797.818658,4879521590.230203,4908737325.32523,4936833673.433418,4965601447.962348,4996493508.43048,5031463204.46298,5037537249.226662,5043722744.352981,5050019689.841935,5056428085.693526,5062920069.317095,5069551365.893958,5076294112.833458,5083148310.135594,5090113957.800368,5096996017.693163,5103905940.176618,5110815862.660072,5117697922.552868,5124607845.036323,5131517767.519777,5138427690.003231,5145337612.486687,5152247534.970141,5159157457.453595,5166067379.937051,5172977302.420505,5179887224.903959,5186797147.387414 +N5,South West NSW,Option 1,1418427595.46,1425333132.6531534,1446268799.9946914,1469948259.1294363,1487141526.8051271,1496070832.9790375,1506444526.7124524,1516145948.2089193,1525369903.3027992,1544376717.5472448,1546736519.9897628,1549139621.5596664,1551586022.2569559,1554075722.081631,1556597896.2518454,1559174194.3312917,1561793791.5381236,1564456687.8723414,1567162883.3339446,1569836604.4500086,1572521150.3479192,1575205696.2458296,1577879417.361894,1580563963.2598045,1583248509.1577146,1585933055.0556252,1588617600.9535356,1591302146.8514462,1593986692.7493567,1596671238.647267,1599355784.5451777,1602040330.4430883,1604724876.3409986 +N5,South West NSW,Option 2,382536056.11,385148834.6004999,391826524.703996,399135083.80676216,404725671.9347864,407486170.8777715,410596647.88537735,413541485.48571044,416364333.7749013,425232036.682354,426186057.5377131,427157583.3628954,428146614.15790075,429153149.92272925,430172814.414925,431214360.1193997,432273410.79369754,433349966.43781835,434444027.0517624,435524958.93833905,436610267.0673715,437695575.1964039,438776507.0829806,439861815.212013,440947123.3410455,442032431.4700779,443117739.5991103,444203047.72814274,445288355.8571752,446373663.9862077,447458972.11524004,448544280.24427253,449629588.3733049 +N5,South West NSW,Option 3,299577968.52,301430153.3962949,306774951.64868736,312397961.74899757,316595939.3309462,318684689.40297085,321078470.7037487,323342248.00547856,325504702.7839533,331709784.81673473,332390376.5567333,333083456.2185667,333789023.80223495,334507079.307738,335234500.7546172,335977532.10379,336733051.3747975,337501058.56763995,338281553.6823172,339052682.8556183,339826934.00937814,340601185.163138,341372314.33643913,342146565.4901989,342920816.64395875,343695067.7977186,344469318.9514784,345243570.1052382,346017821.25899804,346792072.4127579,347566323.5665177,348340574.72027755,349114825.8740373 +N5,South West NSW,Option 4,416523543.37,419592161.89297605,426784213.9668964,435030044.94867826,441723036.10041445,444955476.0375468,448499979.494254,451845052.90427196,455059361.3117193,467316675.2099498,468582992.8303935,469872545.63653326,471185333.62836933,472521356.80590165,473874806.37270606,475257299.9216307,476663028.6562516,478091992.5765687,479544191.68258214,480978964.3993233,482419545.9124885,483860127.4256537,485294900.14239496,486735481.6555602,488176063.1687254,489616644.6818906,491057226.19505584,492497807.70822114,493938389.2213863,495378970.73455155,496819552.2477168,498260133.76088196,499700715.27404714 +N5,South West NSW,Option 5,1046609662.37,1052952433.550638,1071323633.6956228,1091127653.585171,1093303952.6365564,1100869971.5927286,1109393448.461902,1117383500.573282,1124988856.6268702,1146342846.3103762,1149004694.6910162,1151715384.326347,1154474915.2163687,1157283287.361081,1160128290.446811,1163034345.1009045,1165989241.0096884,1168992978.1731632,1172045556.5913289,1175061504.068476,1178089661.859296,1181117819.650116,1184133767.1272635,1187161924.9180837,1190190082.7089036,1193218240.4997237,1196246398.2905438,1199274556.081364,1202302713.8721838,1205330871.663004,1208359029.4538238,1211387187.2446442,1214415345.0354638 +SWNSW1,Secondary Transmission Limit - South West NSW,Option 1,167000000,168033762.58,170734660.18,174001675.03,176856490.5,178229713.55,179731575.18,181125039.64,182441642.04,183022788.08,183614684.67,184217441.76,184831059.33,185455537.4,186088160.82,186734359.86,187391419.39,188059339.4,188738119.9,189408755.04,190082105.3,190755455.56,191426090.69,192099440.95,192772791.21,193446141.47,194119491.73,194792841.98,195466192.24,196139542.5,196812892.76,197486243.02,198159593.27 +N6,Wagga Wagga,Option 1,Refer to Forecasted Transmission flow paths (SNSW-CNSW) Option 3 and 4 in the flow paths,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +N7,Tumut,Option 1,HumeLink,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +N8,Cooma-Monaro,Option 1,201643435.92,203332481.09947747,206890562.86120406,211157393.32182783,214743970.03302857,216428450.63267922,218250636.54295516,219979845.51475963,221650742.66753212,225275368.0381893,226018272.1716338,226774807.5735819,227544974.24403355,228328772.18298873,229122793.5733216,229933854.04928395,230758545.7937499,231596868.80671936,232448823.08819243,233290553.9182878,234135692.56550908,234980831.21273035,235822562.0428257,236667700.69004697,237512839.33726823,238357977.98448953,239203116.63171083,240048255.2789321,240893393.92615336,241738532.57337463,242583671.2205959,243428809.86781716,244273948.5150384 +N8,Cooma-Monaro,Option 2,512954346.52,515746313.32277024,525094845.75862,535570829.7798261,544188611.3109237,548149756.2427804,552642984.5468522,545497432.6261146,549485986.34145,556155550.2711177,557911201.5018915,559699066.516716,561519145.3155916,563371437.898518,565247890.8194824,567164610.9705107,569113544.9055897,571094692.6247197,573108054.1279006,575097255.2930433,577094509.9041986,579091764.515354,581080965.6804968,583078220.2916522,585075474.9028076,587072729.513963,589069984.1251185,591067238.7362739,593064493.3474293,595061747.9585848,597059002.5697402,599056257.1808954,601053511.7920508 +N9,Hunter-Central Coast,Option 1,307063934.91,308253314.1289946,314822944.1075105,320875605.71978575,325062966.38444805,327057438.6371748,329488625.3114743,331801352.67970717,334010986.64000005,337298442.1695044,337930835.20317924,338574831.7787565,339230431.89623594,339897635.55561775,340573541.8714262,341263952.6146126,341965966.89970124,342679584.72669214,343404806.09558535,344121324.8080519,344840744.405994,345560164.00393605,346276682.7164026,346996102.31434464,347715521.91228676,348434941.5102289,349154361.10817087,349873780.706113,350593200.3040551,351312619.9019972,352032039.49993926,352751459.0978813,353470878.6958234 +N9,Hunter-Central Coast,Option 1A,283129029.82,284209281.02826434,290165004.8449576,295750949.6597992,299736762.10025394,301631460.1057646,303919442.1178005,306085986.5372956,308148964.04899776,311482912.3828618,312120640.25105953,312770069.54803157,313431200.27377784,314104032.4282984,314785640.6543997,315481875.6664688,316189812.10731214,316909449.9769298,317640789.2753217,318363352.5021329,319088841.08613765,319814329.6701424,320536892.8969537,321262381.4809584,321987870.0649632,322713358.64896804,323438847.23297274,324164335.81697756,324889824.4009823,325615312.98498714,326340801.56899184,327066290.15299666,327791778.7370014 +N9,Hunter-Central Coast,Option 1AB,274506774.68,275601709.74302685,281315034.7185488,286692285.03282773,290389108.8418773,292116926.81184053,294229969.8447924,296255229.2790906,298207740.2736086,301068367.8869726,301626733.2060974,302195343.7604355,302774199.5499869,303363300.5747517,303960085.5259263,304569677.02111757,305189513.75152224,305819595.71714014,306459922.9179713,307092566.1923926,307727770.7756171,308362975.35884166,308995618.6332629,309630823.21648747,310266027.79971206,310901232.38293654,311536436.96616113,312171641.5493857,312806846.1326102,313442050.7158348,314077255.29905933,314712459.8822839,315347664.4655084 +N9,Hunter-Central Coast,Option 1B,298259423,299459028.0639648,305779139.5944096,311618137.04806894,315516202.714833,317343887.4857389,319600475.1325514,321771800.0293708,323870247.4076546,326696260.5142928,327250320.8393112,327814547.40882534,328388940.22283524,328973499.28134096,329565683.0232184,330170574.57071555,330785632.3627085,331410856.3991972,332046246.68018156,332674012.2777942,333304319.4365307,333934626.5952673,334562392.19287986,335192699.35161644,335823006.51035297,336453313.6690895,337083620.8278261,337713927.9865626,338344235.14529914,338974542.3040357,339604849.4627722,340235156.6215088,340865463.7802453 +N9,Hunter-Central Coast,Option 2,58512971.63,59264516.47234721,60463128.646335594,61586003.06832661,62132057.98194883,62424149.08457694,62768378.68158464,63134260.82154819,63517191.71301671,63564126.00740309,63573163.051127784,63582365.912168525,63591734.590525314,63601269.08619815,63610927.94485802,63620794.07516296,63630826.02278395,63641023.78772098,63651387.36997407,63661626.58924012,63671907.26283518,63682187.93643024,63692427.15569628,63702707.82929134,63712988.5028864,63723269.17648146,63733549.85007653,63743830.52367159,63754111.197266646,63764391.87086171,63774672.54445677,63784953.21805183,63795233.89164689 +N9,Hunter-Central Coast,Option 2A,106104649.48,106707980.56588137,108923877.36792059,110857310.84005766,111741595.0292219,112127710.41144843,112683338.61457375,113276559.8963809,110568873.62265828,110596377.28538355,110605383.60410805,110614555.17638712,110623892.00222076,110633394.081609,110643020.10116316,110652852.68766055,110662850.52771251,110673013.62131906,110683341.96848017,110693546.37547536,110703792.0958592,110714037.81624302,110724242.22323821,110734487.94362204,110744733.66400586,110754979.3843897,110765225.10477354,110775470.82515736,110785716.54554118,110795962.26592502,110806207.98630886,110816453.70669268,110826699.4270765 +N10,Hunter Coast,Option 1,Refer to Forecasted Transmission flow paths CNSW-SNW Option 1,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +N11,Illawarra Coast,Option 1,Refer to N12 forecasted costs,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +N12,Illawarra,Option 1,813815319.22,817811736.6265252,832369006.0703616,848481785.9113315,860767680.5083158,866382557.1392083,872976740.2219964,879280385.8855536,885371634.1700364,892369728.4992911,894675154.5802765,897022882.0572433,899412910.9301914,901845241.199121,904309297.5150366,906826230.5759289,909385465.0328026,911987000.8856577,914630838.1344941,917242949.3363446,919865635.8871903,922488322.4380361,925100433.6398866,927723120.1907322,930345806.7415781,932968493.2924238,935591179.8432697,938213866.3941154,940836552.9449612,943459239.4958069,946081926.0466528,948704612.5974984,951327299.1483443 diff --git a/tests/test_workbook_table_cache/rez_augmentation_options_NSW.csv b/tests/test_workbook_table_cache/rez_augmentation_options_NSW.csv new file mode 100644 index 0000000..96799bc --- /dev/null +++ b/tests/test_workbook_table_cache/rez_augmentation_options_NSW.csv @@ -0,0 +1,30 @@ +REZ / Constraint ID,REZ Name,Option,Description,Additional network capacity (MW),Expected cost ($ million),Estimate source,Cost estimate class,$M/MW,Easement Length (km),Lead time,System strength connection costs ($M/MW) +N1,North West NSW,Option 1,"• Two new 500 kV circuits from Orana REZ to locality of Gilgandra to locality of Boggabri to locality of Moree. • A new single 500 kV circuit from Orana REZ to Wollar. • New 500/330 kV substations in locality of Boggabri and Moree. • A new 500 kV switching station in locality of Gilgandra. • A new 330 kV single-circuit from Sapphire to locality of Moree. • A new 330 kV circuit from Tamworth to locality of Boggabri. • Line shunt reactors at both ends of Orana REZ-locality of Gilgandra, locality of Gilgandra-locality of Boggabri, locality of Boggabri-locality of Moree 500 kV circuits.",1660.0,4684,AEMO TCD,Class 5b,2.8216867469879516,810.0,Long,0.137 +N2,New England,Option 1,"• New 330 kV Northern (Hub 10), Central South (Hub 1) and East (Hub 4) switching stations. • New 500 kV built and initially 330 kV operated DCST line from Central (Hub 5) to Central South (Hub1) with Quad Orange conductor. • New 500 kV built and initially 330 kV operated DCST line from Central (Hub 5) to Northern (Hub 10) with Quad Orange conductor. • New 330 kV DCST line between Central (Hub 5) and East (Hub 4) with Twin Olive conductor.",1000.0,370,AEMO TCD,Class 5b,0.37,60.0,Medium,0.137 +N2,New England,Option 2,"• New North switching station and cuts into Sapphire - Armidale and Dumaresq - Armidale line. • New 500 kV built and initially 330 kV operated double-circuit line from North switching station to Hub 5. • Augment Hub 5 with one additional 500/330 kV transformer. • New 500 kV double-circuit line, strung on one side between Hub 5 to Hub 1. • New 330 kV DCST line from Hub 8 to Hub 5. • New Hub 8 switching station. (Pre-requisite: CNSW-NNSW Option 3)",1500.0,1004,AEMO TCD,Class 5b,0.6693333333333333,140.0,Long, +N2,New England,Option 3,• New Hub 9 switching station. • Establish a new Lower Creek 330/132 kV substation with 1 x 330/132 kV 375 MVA transformer. • Rebuild part of Line 965 as 330 kV double-circuit from Armidale to Lower Creek. • Relocate existing 132 kV 200 MVA phase shift transformer on Line 965 from Armidale to Lower Creek. • New 330 kV double-circuit from Lower Creek to Hub 9. • Cut-in of Line 965 at new Lower Creek substation,900.0,647,AEMO TCD,Class 5b,0.7188888888888889,20.0,Medium, +N3,Central-West Orana,Central West Orana REZ transmission link,"• New Merotherie 500/330 kV substation with 4 x 500/330/33 kV 1500 MVA transformers. • New 330 kV Uarbry East, Uarbry West, Elong Elong switching stations. • New 500 kV Wollar switching station. • 2 x 500 kV double-circuit line from Wollar to Merotherie. • 330 kV double-circuit line from Merotherie to Uarbry East. • 330 kV double-circuit from Merotherie to Uarbry West. • 2 x 500 kV double-circuit and initially operated at 330 kV from Merotherie to Elong Elong. • 5 x 100 MVAr synchronous condensers at Elong Elong switching station. • 5 x 100 MVAr synchronous condensers at Merotherie substation. • Provision of switchbays for future generator connections. • An additional 330 kV single-circuit line from Bayswater to Liddell. • An additional 330 kV single-circuit line from Mt Piper to Wallerawang. • New 330 kV Uungula switching station and cut into Line 79 Wellington – Wollar • 1 x 330 kV DCST from Elong Elong to Uungula with Twin Olive conductor • 2 x 100 MVAr synchronous condensers at Uarbry West switching station • 3 x 100 MVAr synchronous condensers at Uarbry East switching station Note: Hunter Transmission Project is a pre-requisite for allowing network transfer greater than 3 GW.",4500.0,This project is considered to be anticipated and so is not included as an option here. The scope of the project is listed here for context so that the subsequent options can be understood. Option 1 includes expansions and augmentations to this project.,,,,,,Included as part of network build +N3,Central-West Orana,Option 1,• Expand Elong Elong substation with 3 x 500/330/33 kV 1500MVA transformers • Operate 4 circuits between Elong Elong and Merotherie to 500 kV (Pre-requisite: CWO REZ transmission link project) Note: Hunter Transmission Project will be required to get up to 6 GW total network capacity as pre-requisite. Note: Hunter Transmission Project is a pre-requisite for allowing network transfer greater than 3 GW. Note: 3 x 1500 MVA transformers provide for 3 GW at Elong Elong however REZ network capacity is limited to 6 GW,1500.0,243,AEMO TCD,Class 5b,0.162,,Medium, +N3,Central-West Orana,Option 2,• New 330 kV Stubbo switching station and cuts into Wellington - Wollar • New 330 kV single-circuit line between Wollar and Stubbo • Expand Wollar substation with 330 kV busbar and 1 x 500/300/33 kV 1500 MVA transformer,500.0,330,AEMO TCD,Class 5b,0.66,55.0,Medium, +N3,Central-West Orana,Option 3,• New 330 kV Burrendong switching station and cuts into Line Wellington - Mt Piper • New Uungula switching station and cuts into Wollar - Wellington • New 330 kV double-circuit line from Burrendong switching station to Uungula,500.0,273,AEMO TCD,Class 5b,0.546,45.0,Medium, +N4,Broken Hill,Option 1,• 500 kV double-circuit line from Bannaby – Broken Hill (>850 km). • Two mid-point switching stations and reactive plant.,1750.0,5098,AEMO TCD,Class 5b,2.9131428571428573,849.0,Long,0.137 +N4,Broken Hill,Option 2,500 kV double-circuit HVDC line from Bannaby – Broken Hill (>850 km). • New HVDC converter stationss at Bannaby and Broken Hill,1750.0,4576,AEMO TCD,Class 5b,2.6148571428571428,850.0,Long, +N5,South West NSW,Option 1,"• Expand Dinawan 330 kV switching station to 500/330 kV substation with 3 x 500/330/33 kV, 1500 MVA transformers • Operate 500 kV build and 330 kV operated double-circuit line from Dinawan to Wagga to 500 kV (Pre-requisite: EnergyConnect and HumeLink)",2500.0,1418,AEMO TCD,Class 5b,0.5672,0.0,Long,0.137 +N5,South West NSW,Option 2,• New Conargo 330 kV switching station • New 330 kV double-circuit line from Conargo to Dinawan (Pre-requisite: Dinawan - Wagga 500 kV upgrade),800.0,383,AEMO TCD,Class 5b,0.47875,69.0,Long, +N5,South West NSW,Option 3,• New Marbins Well 330 kV switching station • New 330 kV DCST line from Mabins Well to Dinawan (Pre-requisite: Dinawan - Wagga 500 kV upgrade),1400.0,300,AEMO TCD,Class 5b,0.21428571428571427,50.0,Long, +N5,South West NSW,Option 4,• New The Plains 330 kV switching station • New 330 kV double-circuit line and strung on one side from The Plains to Dinawan (Pre-requisite: South West REZ Option 1),1400.0,417,AEMO TCD,Class 5b,0.2978571428571429,88.0,Long, +N5,South West NSW,Option 5,• New Hays Plain 330 kV switching station • New Abercrombie 330 kV switching station • New 330 kV double-circuit line from Hays Plain to Abercrombie • New 330 kV double-circuit line from Abercrombie to The Plain • String the other side of 330 kV line from The Plain to Dinawan (Pre-requisite: South West REZ Option 4),1400.0,1047,AEMO TCD,Class 5b,0.7478571428571429,280.0,Long, +SWNSW1,Secondary Transmission Limit - South West NSW,Option 1,"• Establish a new Darlington Point to Dinawan 330 kV transmission line, post Project EnergyConnect (Pre-requisite: Project EnergyConnect and HumeLink)",600.0,167,Transgrid,Class 5a,0.2783333333333333,90.0,Short, +N6,Wagga Wagga,Option 1,Refer to SNSW-CNSW Option 3 and 4 subregional augmentations,,,,,,,,0.137 +N7,Tumut,Option 1,Refer to SNSW-CNSW Option 1 subregional augmentations,,,,,,,,Included as connection cost +N8,Cooma-Monaro,Option 1,• 132 kV single-circuit Williamsdale to Cooma-Monaro substation (located near generation interest),150.0,202,AEMO TCD,Class 5b,1.3466666666666667,81.0,Medium,0.137 +N8,Cooma-Monaro,Option 2,• 330 kV line Cooma-Williamdale-Stockdill • Two 330/132 kV transformers at Cooma,500.0,512,AEMO TCD,Class 5b,1.024,126.0,Medium, +N9,Hunter-Central Coast,Option 1,• Rebuild the existing Line 83 Liddell - Muswellbrook as 330 kV double-circuit line • 1 x 330 kV double-circuit from East Hub to Muswellbrook • 1 x 330 kV double-circuit from West Hub to Muswellbrook,950.0,307,AEMO TCD,Class 5b,0.3231578947368421,39.0,Medium,0.137 +N9,Hunter-Central Coast,Option 1A,"• Install a new 330 kV circuit between Liddell and Muswellbrook, Twin Olive conductor • 1 x 330 kV DCST from East Hub to Muswellbrook conductor • 1 x 330 kV DCST from West Hub to Muswellbrook",950.0,283,AEMO TCD,Class 5b,0.29789473684210527,57.0,Medium, +N9,Hunter-Central Coast,Option 1AB,• Install a new 330 kV circuit between Liddell and Muswellbrook • 1 x 330 kV DCST from East Muswellbrook Hub to Muswellbrook • Build 330/132 kV 375 MVA transformer at West Muswellbrook Hub • 1 x 132 kV DCST from West Muswellbrook Hub to Muswellbrook,850.0,274,AEMO TCD,Class 5b,0.32235294117647056,57.0,Medium, +N9,Hunter-Central Coast,Option 1B,• Rebuild the existing Line 83 Liddell - Muswellbrook as 330 kV double-circuit line • 1 x 330 kV DCST from East Muswellbrook Hub to Muswellbrook • Build 330/132 kV 375MVA transformer at West Muswellbrook Hub • 1 x 132 kV DCST from West Muswellbrook Hub to Muswellbrook,850.0,298,AEMO TCD,Class 5b,0.35058823529411764,39.0,Medium, +N9,Hunter-Central Coast,Option 2,• New 330 kV Singleton switching station and cuts into line 82 Liddell - Tomago,500.0,59,AEMO TCD,Class 5b,0.118,,Short, +N9,Hunter-Central Coast,Option 2A,• New 330/132 kV 375 MVA Singleton two transformer substation and cuts into line 82 Liddell - Tomago and connected to Ausgrid's Singleton 132 kV substation switching station,375.0,106,AEMO TCD,Class 5b,0.2826666666666667,,Medium, +N10,Hunter Coast,Option 1,Refer to CNSW-SNW Option 1 subregional augmentations,,,,,,,,0.137 +N11,Illawarra Coast,Option 1,• 500 kV double-circuit line from Dapto – Bannaby. • Two 500/330 kV 1500 MVA transformers at Dapto. (Pre-requisite: CNSW – SNW Option 2),2000.0,814,AEMO TCD,Class 5b,0.407,100.0,Long,0.137 +N12,Illawarra,Option 1,• 500 kV double-circuit line from Dapto – Bannaby. • Two 500/330 kV 1500 MVA transformers at Dapto. (Pre-requisite: CNSW – SNW Option 2),2000.0,814,AEMO TCD,Class 5b,0.407,100.0,Long,0.137 diff --git a/tests/test_workbook_table_cache/transmission_expansion_costs.csv b/tests/test_workbook_table_cache/transmission_expansion_costs.csv index 72fa8de..e4bd085 100644 --- a/tests/test_workbook_table_cache/transmission_expansion_costs.csv +++ b/tests/test_workbook_table_cache/transmission_expansion_costs.csv @@ -1,4 +1,4 @@ -,flow_path_name,indicative_transmission_expansion_cost_$/mw +,flow_path,indicative_transmission_expansion_cost_$/mw 0,CQ-NQ,1.126363636 1,CQ-GG,0.838709677 2,SQ-CQ,0.513333333 diff --git a/uv.lock b/uv.lock index 7dd8b93..e0da163 100644 --- a/uv.lock +++ b/uv.lock @@ -1,4 +1,5 @@ version = 1 +revision = 1 requires-python = ">=3.10" resolution-markers = [ "python_full_version < '3.11'", @@ -360,7 +361,7 @@ name = "click" version = "8.1.7" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "platform_system == 'Windows'" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de", size = 336121 } wheels = [ @@ -917,7 +918,7 @@ name = "ipykernel" version = "6.29.5" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "appnope", marker = "platform_system == 'Darwin'" }, + { name = "appnope", marker = "sys_platform == 'darwin'" }, { name = "comm" }, { name = "debugpy" }, { name = "ipython" }, @@ -959,7 +960,7 @@ wheels = [ [[package]] name = "isp-trace-parser" -version = "1.0.1" +version = "1.0.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "joblib" }, @@ -967,14 +968,14 @@ dependencies = [ { name = "polars" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/7c/ee/bb1766136177990809f922b31d157aa64276de2bd39d52dc5396d03869d1/isp_trace_parser-1.0.1.tar.gz", hash = "sha256:24b2bfea350bc7d9cf914cf37aca94ae2ae8d6a30e4c827bf2421f83440613c7", size = 47637 } +sdist = { url = "https://files.pythonhosted.org/packages/47/e7/31602674cebf1b951f113d0c185a00499a1d387ffdda4cf6e2e5a34c2004/isp_trace_parser-1.0.3.tar.gz", hash = "sha256:8862ac7bfe41ac7d3b7ac05e5f4f9fb9ce21689e48b5c73a1ca4b8f4b503f395", size = 47948 } wheels = [ - { url = "https://files.pythonhosted.org/packages/32/db/5275985f5305975443f58363307a0283b3549fdeec53b99827148d803f6a/isp_trace_parser-1.0.1-py3-none-any.whl", hash = "sha256:f7c59e0b449017454e9821d87ca80393e8599a9072dc1480a7d0fea93581ead3", size = 44849 }, + { url = "https://files.pythonhosted.org/packages/30/5a/74b90f1aaeec4304e9a80d310fb2ba247b960489b84463dee45f18fbfc8d/isp_trace_parser-1.0.3-py3-none-any.whl", hash = "sha256:a17bebd07edf6edd9c31b27dff361dfc64efbfd44d0f9e61c8642089ba461b1a", size = 45111 }, ] [[package]] name = "isp-workbook-parser" -version = "2.4.1" +version = "2.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "openpyxl" }, @@ -983,9 +984,9 @@ dependencies = [ { name = "pyyaml" }, { name = "thefuzz" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b4/b3/c82c654fdfa843b00e08609492204d68435a125138e8294ebaf51d5ef245/isp_workbook_parser-2.4.1.tar.gz", hash = "sha256:61d6577d90ff804ef662368d77a1c1978ab7456aae863f71478c55f20f1621d6", size = 52394 } +sdist = { url = "https://files.pythonhosted.org/packages/da/d3/c33976dccb3ba135f785d685d26d9a7c4479377e3d584afd628915bba96c/isp_workbook_parser-2.6.0.tar.gz", hash = "sha256:e35a756311f39e8b7327b8fd2e30e367f41400ac710106a1b42de383ade3cb1c", size = 53856 } wheels = [ - { url = "https://files.pythonhosted.org/packages/8e/0a/ce602e2f1a45dc45bcb266c43fcf9901eaeeb182f24cb15e3ff2774b0f0f/isp_workbook_parser-2.4.1-py3-none-any.whl", hash = "sha256:65731de03f29af97ead30a903d0fa3e45a5e9862358c82ea21e6ed807ea444df", size = 84820 }, + { url = "https://files.pythonhosted.org/packages/2f/96/d30d8e0e633adf8376310e71f11309169b44271142219a0e289e306194f0/isp_workbook_parser-2.6.0-py3-none-any.whl", hash = "sha256:d20ab3c442aa0293019810b9edb7aaad86e1c12768bfb506a8ae98133f503324", size = 86466 }, ] [[package]] @@ -1031,8 +1032,8 @@ dev = [ [package.metadata] requires-dist = [ { name = "doit", specifier = ">=0.36.0" }, - { name = "isp-trace-parser", specifier = ">=1.0.0" }, - { name = "isp-workbook-parser", specifier = ">=2.4.1" }, + { name = "isp-trace-parser", specifier = ">=1.0.3" }, + { name = "isp-workbook-parser", specifier = ">=2.6.0" }, { name = "linopy", marker = "extra == 'solvers'", specifier = ">=0.4.4" }, { name = "pandas", specifier = ">=2.2.2" }, { name = "pyarrow", specifier = ">=18.0.0" }, @@ -1042,6 +1043,7 @@ requires-dist = [ { name = "thefuzz", specifier = ">=0.22.1" }, { name = "xmltodict", specifier = ">=0.13.0" }, ] +provides-extras = ["solvers"] [package.metadata.requires-dev] dev = [ @@ -3093,7 +3095,7 @@ name = "tqdm" version = "4.67.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "platform_system == 'Windows'" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737 } wheels = [