diff --git a/pyomo/contrib/doe/__init__.py b/pyomo/contrib/doe/__init__.py index 0295d6eed79..0808d449432 100644 --- a/pyomo/contrib/doe/__init__.py +++ b/pyomo/contrib/doe/__init__.py @@ -8,8 +8,8 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from .doe import DesignOfExperiments, ObjectiveLib, FiniteDifferenceStep -from .utils import rescale_FIM +from .doe import DesignOfExperiments, ObjectiveLib, GradientMethod +from .utils import rescale_FIM, ExperimentGradients # Deprecation errors for old Pyomo.DoE interface classes and structures from pyomo.common.deprecation import deprecated @@ -45,3 +45,10 @@ def __init__(self, *args): class ModelOptionLib: def __init__(self, *args): raise RuntimeError(deprecation_message) + +@deprecated( + "Use of FiniteDifferenceStep in Pyomo.DoE is no longer supported. Use GradientMethod instead.", version='6.9.4' +) +class FiniteDifferenceStep: + def __init__(self, *args): + raise RuntimeError(deprecation_message) \ No newline at end of file diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 98319708476..df9e7ac160b 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -48,6 +48,8 @@ from pyomo.opt import SolverStatus +from pyomo.contrib.doe.utils import ExperimentGradients + # This small and positive tolerance is used when checking # if the prior is negative definite or approximately # indefinite. It is defined as a tolerance here to ensure @@ -69,16 +71,18 @@ class ObjectiveLib(Enum): zero = "zero" -class FiniteDifferenceStep(Enum): +class GradientMethod(Enum): forward = "forward" central = "central" backward = "backward" + symbolic = "symbolic" class DesignOfExperiments: def __init__( self, experiment=None, + gradient_method=None, fd_formula="central", step=1e-3, objective_option="determinant", @@ -108,9 +112,15 @@ def __init__( Experiment object that holds the model and labels all the components. The object should have a ``get_labeled_model`` where a model is returned with the following labeled sets: ``unknown_parameters``, ``experimental_inputs``, ``experimental_outputs`` + gradient_method: + Method to use for computing the gradient of the model. Must be one of + [``forward``, ``central``, ``backward``, ``symbolic``, None], default: None. + If None, will fall back to catch the legacy optional input of fd_formula. fd_formula: Finite difference formula for computing the sensitivity matrix. Must be one of [``central``, ``forward``, ``backward``], default: ``central`` + This option will be deprecated in the future, and the user should + use the ``gradient_method`` option instead. step: Relative step size for the finite difference formula. default: 1e-3 @@ -170,8 +180,21 @@ def __init__( # Set the experiment object from the user self.experiment = experiment - # Set the finite difference and subsequent step size - self.fd_formula = FiniteDifferenceStep(fd_formula) + # Set the gradient method + if gradient_method is None: + + # Throw a deprecation warning + # TODO: Need to add a deprecation warning here + + self._gradient_method = GradientMethod(fd_formula) + else: + self._gradient_method = GradientMethod(gradient_method) + + if self._gradient_method is not GradientMethod.symbolic: + if step is None: + raise ValueError( + "If the gradient method is not symbolic, a step size must be provided." + ) self.step = step # Set the objective type and scaling options: @@ -262,7 +285,7 @@ def run_doe(self, model=None, results_file=None): # model.add_component(doe_block_name, doe_block) pass - # ToDo: potentially work with this for more complicated models + # TODO: potentially work with this for more complicated models # Create the full DoE model (build scenarios for F.D. scheme) if not self._built_scenarios: self.create_doe_model(model=model) @@ -416,7 +439,7 @@ def run_doe(self, model=None, results_file=None): self.results["Wall-clock Time"] = build_time + initialization_time + solve_time # Settings used to generate the optimal DoE - self.results["Finite Difference Scheme"] = str(self.fd_formula).split(".")[-1] + self.results["Gradient Method"] = str(self._gradient_method).split(".")[-1] self.results["Finite Difference Step"] = self.step self.results["Nominal Parameter Scaling"] = self.scale_nominal_param_value @@ -485,12 +508,17 @@ def compute_FIM(self, model=None, method="sequential"): # TODO: Add a check to see if the model has an objective and deactivate it. # This solve should only be a square solve without any obj function. - if method == "sequential": - self._sequential_FIM(model=model) - self._computed_FIM = self.seq_FIM - elif method == "kaug": + + if method == "kaug" or self._gradient_method == GradientMethod.symbolic: + + # The calculation with kaug and symbolic gradients are similar. + # This function skips using kaug if the gradient method is symbolic. + self._kaug_FIM(model=model) self._computed_FIM = self.kaug_FIM + elif method == "sequential": + self._sequential_FIM(model=model) + self._computed_FIM = self.seq_FIM else: raise ValueError( "The method provided, {}, must be either `sequential` or `kaug`".format( @@ -522,7 +550,7 @@ def _sequential_FIM(self, model=None): model.parameter_scenarios = pyo.Suffix(direction=pyo.Suffix.LOCAL) # Populate parameter scenarios, and scenario inds based on finite difference scheme - if self.fd_formula == FiniteDifferenceStep.central: + if self._gradient_method == GradientMethod.central: model.parameter_scenarios.update( (2 * ind, k) for ind, k in enumerate(model.unknown_parameters.keys()) ) @@ -531,9 +559,9 @@ def _sequential_FIM(self, model=None): for ind, k in enumerate(model.unknown_parameters.keys()) ) model.scenarios = range(len(model.unknown_parameters) * 2) - elif self.fd_formula in [ - FiniteDifferenceStep.forward, - FiniteDifferenceStep.backward, + elif self._gradient_method in [ + GradientMethod.forward, + GradientMethod.backward, ]: model.parameter_scenarios.update( (ind + 1, k) for ind, k in enumerate(model.unknown_parameters.keys()) @@ -541,7 +569,7 @@ def _sequential_FIM(self, model=None): model.scenarios = range(len(model.unknown_parameters) + 1) else: raise AttributeError( - "Finite difference option not recognized. Please contact the developers as you should not see this error." + "Gradient method option not recognized. Please contact the developers as you should not see this error." ) # Fix design variables @@ -553,21 +581,21 @@ def _sequential_FIM(self, model=None): # Calculate measurement values for each scenario for s in model.scenarios: # Perturbation to be (1 + diff) * param_value - if self.fd_formula == FiniteDifferenceStep.central: + if self._gradient_method == GradientMethod.central: diff = self.step * ( (-1) ** s ) # Positive perturbation, even; negative, odd - elif self.fd_formula == FiniteDifferenceStep.backward: + elif self._gradient_method == GradientMethod.backward: diff = ( self.step * -1 * (s != 0) ) # Backward always negative perturbation; 0 at s = 0 - elif self.fd_formula == FiniteDifferenceStep.forward: + elif self._gradient_method == GradientMethod.forward: diff = self.step * (s != 0) # Forward always positive; 0 at s = 0 # If we are doing forward/backward, no change for s=0 skip_param_update = ( - self.fd_formula - in [FiniteDifferenceStep.forward, FiniteDifferenceStep.backward] + self._gradient_method + in [GradientMethod.forward, GradientMethod.backward] ) and (s == 0) if not skip_param_update: param = model.parameter_scenarios[s] @@ -610,17 +638,19 @@ def _sequential_FIM(self, model=None): # Loop over parameter values and grab correct columns for finite difference calculation + # TODO: Does this assume the nominal parameter value is stored in the suffix? What if the suffix was + # contains None? for k, v in model.unknown_parameters.items(): curr_step = v * self.step - if self.fd_formula == FiniteDifferenceStep.central: + if self._gradient_method == GradientMethod.central: col_1 = 2 * i col_2 = 2 * i + 1 curr_step *= 2 - elif self.fd_formula == FiniteDifferenceStep.forward: + elif self._gradient_method == GradientMethod.forward: col_1 = i col_2 = 0 - elif self.fd_formula == FiniteDifferenceStep.backward: + elif self._gradient_method == GradientMethod.backward: col_1 = 0 col_2 = i @@ -651,8 +681,8 @@ def _sequential_FIM(self, model=None): # Use kaug to get FIM def _kaug_FIM(self, model=None): """ - Used to compute the FIM using kaug, a sensitivity-based - approach that directly computes the FIM. + Used to compute the FIM using symbolic/automatic differentiation + implemented in kaug or PyNumero Parameters ---------- @@ -677,50 +707,88 @@ def _kaug_FIM(self, model=None): self.solver.solve(model, tee=self.tee) - # Probe the solved model for dsdp results (sensitivities s.t. parameters) - params_dict = {k.name: v for k, v in model.unknown_parameters.items()} - params_names = list(params_dict.keys()) + if self._gradient_method == GradientMethod.symbolic: + + experiment_grad = ExperimentGradients(model, automatic=True, symbolic=False) - dsdp_re, col = get_dsdp(model, params_names, params_dict, tee=self.tee) + print("AD gradient:\n") + df = experiment_grad.get_numeric_sensitivity_as_df() + print(df) - # analyze result - dsdp_array = dsdp_re.toarray().T + print("\nExperiment outputs:") + for o in model.experiment_outputs: + o.pprint() - # store dsdp returned - dsdp_extract = [] - # get right lines from results - measurement_index = [] + print("\n Experiment inputs:") + for i in model.experiment_inputs: + i.pprint() - # loop over measurement variables and their time points - for k, v in model.experiment_outputs.items(): - name = k.name - try: - kaug_no = col.index(name) - measurement_index.append(kaug_no) - # get right line of dsdp - dsdp_extract.append(dsdp_array[kaug_no]) - except: - # k_aug does not provide value for fixed variables - self.logger.debug("The variable is fixed: %s", name) - # produce the sensitivity for fixed variables - zero_sens = np.zeros(len(params_names)) - # for fixed variables, the sensitivity are a zero vector - dsdp_extract.append(zero_sens) - - # Extract and calculate sensitivity if scaled by constants or parameters. - jac = [[] for k in params_names] - - for d in range(len(dsdp_extract)): - for k, v in model.unknown_parameters.items(): - p = params_names.index(k.name) # Index of parameter in np array - # if scaled by parameter value or constant value - sensi = dsdp_extract[d][p] * self.scale_constant_value - if self.scale_nominal_param_value: - sensi *= v - jac[p].append(sensi) + # Transpose is not needed here, as the Jacobian is already in the right shape + # (measurements x parameters) + self.kaug_jac = experiment_grad.compute_gradient_outputs_wrt_unknown_parameters() - # record kaug jacobian - self.kaug_jac = np.array(jac).T + if self.scale_nominal_param_value: + # Scale the sensitivities by the nominal parameter values + ''' + for i, p in enumerate(model.unknown_parameters.keys()): + # Scale by the nominal parameter value taken from the Pyomo model + self.kaug_jac[:, i] *= pyo.value(p) + ''' + + for i, (k, v) in enumerate(model.unknown_parameters.items()): + # Scale by the nominal parameter value taken from the suffix (?) + self.kaug_jac[:,i] *= v + + # Scale the sensitivities by the constant value + if self.scale_constant_value: + # Skip scaling if the value is None + self.kaug_jac *= self.scale_constant_value + + else: + # Probe the solved model for dsdp results (sensitivities s.t. parameters) + params_dict = {k.name: v for k, v in model.unknown_parameters.items()} + params_names = list(params_dict.keys()) + + dsdp_re, col = get_dsdp(model, params_names, params_dict, tee=self.tee) + + # analyze result + dsdp_array = dsdp_re.toarray().T + + # store dsdp returned + dsdp_extract = [] + # get right lines from results + measurement_index = [] + + # loop over measurement variables and their time points + for k, v in model.experiment_outputs.items(): + name = k.name + try: + kaug_no = col.index(name) + measurement_index.append(kaug_no) + # get right line of dsdp + dsdp_extract.append(dsdp_array[kaug_no]) + except: + # k_aug does not provide value for fixed variables + self.logger.debug("The variable is fixed: %s", name) + # produce the sensitivity for fixed variables + zero_sens = np.zeros(len(params_names)) + # for fixed variables, the sensitivity are a zero vector + dsdp_extract.append(zero_sens) + + # Extract and calculate sensitivity if scaled by constants or parameters. + jac = [[] for k in params_names] + + for d in range(len(dsdp_extract)): + for k, v in model.unknown_parameters.items(): + p = params_names.index(k.name) # Index of parameter in np array + # if scaled by parameter value or constant value + sensi = dsdp_extract[d][p] * self.scale_constant_value + if self.scale_nominal_param_value: + sensi *= v + jac[p].append(sensi) + + # record kaug jacobian + self.kaug_jac = np.array(jac).T # Compute FIM if self.prior_FIM is None: @@ -736,12 +804,21 @@ def _kaug_FIM(self, model=None): cov_y[count, count] = 1 / v count += 1 - # ToDo: need to add a covariance matrix for measurements (sigma inverse) + # TODO: need to add a covariance matrix for measurements (sigma inverse) # i.e., cov_y = self.cov_y or model.cov_y # Still deciding where this would be best. + print("Dimensions of kaug_jac = ",self.kaug_jac.shape) + print("Dimensions of cov_y", cov_y.shape) + print("Dimensions of prior_FIM", self.prior_FIM.shape) + self.kaug_FIM = self.kaug_jac.T @ cov_y @ self.kaug_jac + self.prior_FIM + print("kaug_jac\n:", self.kaug_jac) + print("\n\ncov_y\n:", cov_y) + print("\n\nkaug_FIM\n:", self.kaug_FIM) + print("\n\nprior_FIM\n:", self.prior_FIM) + # Create the DoE model (with ``scenarios`` from finite differencing scheme) def create_doe_model(self, model=None): """ @@ -779,9 +856,13 @@ def create_doe_model(self, model=None): "Cannot compute determinant with explicit formula if only_compute_fim_lower is True." ) - # Generate scenarios for finite difference formulae + # Generate scenarios for computing the gradients/sensitivities self._generate_scenario_blocks(model=model) + + # TODO: Is this indexing code correct for symbolic gradients if we keep + # "scenario_blocks[0]"? + # Set names for indexing sensitivity matrix (jacobian) and FIM scen_block_ind = min( [ @@ -808,6 +889,15 @@ def identity_matrix(m, i, j): else: return 0 + # TODO: Replace this Jacobian initialization with automatic differentiation + # Let's work on this AFTER the symbolic differentiation is implemented and tested + + # Create object for computing gradients + experiment_grad = ExperimentGradients(model.scenario_blocks[0], # Always analyze scenario 0 + automatic=True, + symbolic=self._gradient_method == GradientMethod.symbolic) + + ### Initialize the Jacobian if provided by the user # If the user provides an initial Jacobian, convert it to a dictionary @@ -833,6 +923,9 @@ def initialize_jac(m, i, j): model.output_names, model.parameter_names, initialize=initialize_jac ) + # TODO: We can initialize the FIM more robustly once we initialize the Jacobian + # using automatic differentiation. + # Initialize the FIM if self.fim_initial is not None: dict_fim_initialize = { @@ -853,7 +946,7 @@ def initialize_fim(m, j, d): model.parameter_names, model.parameter_names, initialize=identity_matrix ) - # To-Do: Look into this functionality..... + # TODO: Look into this functionality..... # if cholesky, define L elements as variables if self.Cholesky_option and self.objective_option == ObjectiveLib.determinant: model.L = pyo.Var( @@ -871,50 +964,121 @@ def initialize_fim(m, j, d): if c == d: model.L[c, d].setlb(self.L_diagonal_lower_bound) - # jacobian rule - def jacobian_rule(m, n, p): - """ - m: Pyomo model - n: experimental output - p: unknown parameter - """ - fd_step_mult = 1 - cuid = pyo.ComponentUID(n) - param_ind = m.parameter_names.data().index(p) - - # Different FD schemes lead to different scenarios for the computation - if self.fd_formula == FiniteDifferenceStep.central: - s1 = param_ind * 2 - s2 = param_ind * 2 + 1 - fd_step_mult = 2 - elif self.fd_formula == FiniteDifferenceStep.forward: - s1 = param_ind + 1 - s2 = 0 - elif self.fd_formula == FiniteDifferenceStep.backward: - s1 = 0 - s2 = param_ind + 1 - - var_up = cuid.find_component_on(m.scenario_blocks[s1]) - var_lo = cuid.find_component_on(m.scenario_blocks[s2]) - - param = m.parameter_scenarios[max(s1, s2)] - param_loc = pyo.ComponentUID(param).find_component_on(m.scenario_blocks[0]) - param_val = m.scenario_blocks[0].unknown_parameters[param_loc] - param_diff = param_val * fd_step_mult * self.step - if self.scale_nominal_param_value: - return ( - m.sensitivity_jacobian[n, p] - == (var_up - var_lo) - / param_diff - * param_val - * self.scale_constant_value - ) - else: - return ( - m.sensitivity_jacobian[n, p] - == (var_up - var_lo) / param_diff * self.scale_constant_value - ) + # TODO: This code changes for symbolic gradients + + if self._gradient_method == GradientMethod.symbolic: + #print("TODO: Need to implement symbolic gradients for the FIM computation.") + #pass + + # Notes for next steps: + # 1. Add variables for all elements in the Jacobian matrix (all variables) + # 2. Add constraints to compute the Jacobian elements + # 3. Add expressions to link the computed Jacobian elements using symbolic differentiation + # with the model. Need to be careful about the indexing of the variables + + # Add constraints to compute the Jacobian elements + # These constraints are added on scenario_blocks[0] + experiment_grad.construct_sensitivity_constraints() + + # Add expressions to link the computed Jacobian elements using symbolic differentiation + # with the model. Need to be careful about the indexing of the variables + + print("\nMeasurement Mapping:") + for k,v in experiment_grad.measurement_mapping.items(): + print("key :", k, "value:", v) + + print("\nParameter Mapping:") + for k,v in experiment_grad.parameter_mapping.items(): + print("key :", k, "value:", v) + + @model.Constraint(model.output_names, model.parameter_names) + def jacobian_rule(m, n, p): + """ + m: Pyomo model + n: experimental output + p: unknown parameter + """ + + # Look up positions in the jacobian matrix + output_cuid = pyo.ComponentUID(n) + output_var = output_cuid.find_component_on(model.scenario_blocks[0]) + + parameter_cuid = pyo.ComponentUID(p) + parameter_var = parameter_cuid.find_component_on(model.scenario_blocks[0]) + + i = experiment_grad.measurement_mapping[output_var] + j = experiment_grad.parameter_mapping[parameter_var] + + + if i is None: + # Measurement (experiment output) was fixed and thus it is not in + # the jacobian matrix, so we set the value to 0 + var = 0 + else: + # Grab the variable. This was created by construct_sensitivity_constraints() + var = m.scenario_blocks[0].jac_variables_wrt_param[i,j] + + if self.scale_nominal_param_value: + scale = parameter_var + else: + scale = 1 + + # TODO: Make this an expression instead of a constraint? + # Get the sensitivity jacobian value + return m.sensitivity_jacobian[n, p] == var * self.scale_constant_value * scale + + else: + + # jacobian rule + def jacobian_rule(m, n, p): + """ + m: Pyomo model + n: experimental output + p: unknown parameter + """ + fd_step_mult = 1 + cuid = pyo.ComponentUID(n) + param_ind = m.parameter_names.data().index(p) + + # Different FD schemes lead to different scenarios for the computation + if self._gradient_method == GradientMethod.central: + s1 = param_ind * 2 + s2 = param_ind * 2 + 1 + fd_step_mult = 2 + elif self._gradient_method == GradientMethod.forward: + s1 = param_ind + 1 + s2 = 0 + elif self._gradient_method == GradientMethod.backward: + s1 = 0 + s2 = param_ind + 1 + + var_up = cuid.find_component_on(m.scenario_blocks[s1]) + var_lo = cuid.find_component_on(m.scenario_blocks[s2]) + + param = m.parameter_scenarios[max(s1, s2)] + param_loc = pyo.ComponentUID(param).find_component_on(m.scenario_blocks[0]) + param_val = m.scenario_blocks[0].unknown_parameters[param_loc] + param_diff = param_val * fd_step_mult * self.step + + if self.scale_nominal_param_value: + return ( + m.sensitivity_jacobian[n, p] + == (var_up - var_lo) + / param_diff + * param_val + * self.scale_constant_value + ) + else: + return ( + m.sensitivity_jacobian[n, p] + == (var_up - var_lo) / param_diff * self.scale_constant_value + ) + + model.jacobian_constraint = pyo.Constraint( + model.output_names, model.parameter_names, rule=jacobian_rule + ) + # A constraint to calculate elements in Hessian matrix # transfer prior FIM to be Expressions @@ -964,9 +1128,7 @@ def fim_rule(m, p, q): + m.prior_FIM[p, q] ) - model.jacobian_constraint = pyo.Constraint( - model.output_names, model.parameter_names, rule=jacobian_rule - ) + model.fim_constraint = pyo.Constraint( model.parameter_names, model.parameter_names, rule=fim_rule ) @@ -1023,6 +1185,10 @@ def _generate_scenario_blocks(self, model=None): self.logger.info("Experiment output and measurement error lengths match.") + # TODO: If we keep the Jacobain the same (e.g., same indexing), this + # code probably does not change. If we use different indexing for finite + # difference versus symbolic gradients, this code will need to change. + # Check that the user input FIM and Jacobian are the correct dimension if self.prior_FIM is not None: self.check_model_FIM(FIM=self.prior_FIM) @@ -1040,8 +1206,24 @@ def _generate_scenario_blocks(self, model=None): # Make a new Suffix to hold which scenarios are associated with parameters model.parameter_scenarios = pyo.Suffix(direction=pyo.Suffix.LOCAL) + # TODO: Update this for symbolic gradients. Main idea: + # - Keep scenario_blocks[0] as the base model (just rename?) + # - Do not create other scenarios + # - Exit + # Populate parameter scenarios, and scenario inds based on finite difference scheme - if self.fd_formula == FiniteDifferenceStep.central: + if self._gradient_method == GradientMethod.symbolic: + # TODO: Or do we need to update this with 0? + + # Is it okay to just keep this empty? + # Create a base scenario + # model.parameter_scenarios.update(0, None) + + # Only one scenario for symbolic gradients + model.scenarios = range(1) + + pass + elif self._gradient_method == GradientMethod.central: model.parameter_scenarios.update( (2 * ind, k) for ind, k in enumerate(model.base_model.unknown_parameters.keys()) @@ -1051,9 +1233,9 @@ def _generate_scenario_blocks(self, model=None): for ind, k in enumerate(model.base_model.unknown_parameters.keys()) ) model.scenarios = range(len(model.base_model.unknown_parameters) * 2) - elif self.fd_formula in [ - FiniteDifferenceStep.forward, - FiniteDifferenceStep.backward, + elif self._gradient_method in [ + GradientMethod.forward, + GradientMethod.backward, ]: model.parameter_scenarios.update( (ind + 1, k) @@ -1062,7 +1244,7 @@ def _generate_scenario_blocks(self, model=None): model.scenarios = range(len(model.base_model.unknown_parameters) + 1) else: raise AttributeError( - "Finite difference option not recognized. Please contact the developers as you should not see this error." + "Gradient method option not recognized. Please contact the developers as you should not see this error." ) # TODO: Allow Params for `unknown_parameters` and `experiment_inputs` @@ -1091,10 +1273,15 @@ def build_block_scenarios(b, s): m = b.model() b.transfer_attributes_from(m.base_model.clone()) + if self._gradient_method == GradientMethod.symbolic: + # If symbolic gradients, we do not need to perturb parameters + # Just use the base model as the scenario block + return + # Forward/Backward difference have a stationary case (s == 0), no parameter to perturb - if self.fd_formula in [ - FiniteDifferenceStep.forward, - FiniteDifferenceStep.backward, + if self._gradient_method in [ + GradientMethod.forward, + GradientMethod.backward, ]: if s == 0: return @@ -1102,13 +1289,13 @@ def build_block_scenarios(b, s): param = m.parameter_scenarios[s] # Perturbation to be (1 + diff) * param_value - if self.fd_formula == FiniteDifferenceStep.central: + if self._gradient_method == GradientMethod.central: diff = self.step * ( (-1) ** s ) # Positive perturbation, even; negative, odd - elif self.fd_formula == FiniteDifferenceStep.backward: + elif self._gradient_method == GradientMethod.backward: diff = self.step * -1 # Backward always negative perturbation - elif self.fd_formula == FiniteDifferenceStep.forward: + elif self._gradient_method == GradientMethod.forward: diff = self.step # Forward always positive else: # To-Do: add an error message for this as not being implemented yet @@ -1121,33 +1308,41 @@ def build_block_scenarios(b, s): ).set_value(m.base_model.unknown_parameters[param] * (1 + diff)) res = self.solver.solve(b, tee=self.tee) + # TODO: This will change for symbolic gradients. Rename the base_model as + # scenario_blocks[0] and do not create other scenarios. model.scenario_blocks = pyo.Block(model.scenarios, rule=build_block_scenarios) # To-Do: this might have to change if experiment inputs have # a different value in the Suffix (currently it is the CUID) design_vars = [k for k, v in model.scenario_blocks[0].experiment_inputs.items()] - # Add constraints to equate block design with global design: - for ind, d in enumerate(design_vars): - con_name = "global_design_eq_con_" + str(ind) - - # Constraint rule for global design constraints - def global_design_fixing(m, s): - if s == 0: - return pyo.Constraint.Skip - block_design_var = pyo.ComponentUID( - d, context=m.scenario_blocks[0] - ).find_component_on(m.scenario_blocks[s]) - return d == block_design_var + # TODO: Do not need this for symbolic gradients + if self._gradient_method == GradientMethod.symbolic: + pass + else: + # Add constraints to equate block design with global design: + for ind, d in enumerate(design_vars): + con_name = "global_design_eq_con_" + str(ind) + + # Constraint rule for global design constraints + def global_design_fixing(m, s): + if s == 0: + return pyo.Constraint.Skip + block_design_var = pyo.ComponentUID( + d, context=m.scenario_blocks[0] + ).find_component_on(m.scenario_blocks[s]) + return d == block_design_var + + model.add_component( + con_name, pyo.Constraint(model.scenarios, rule=global_design_fixing) + ) - model.add_component( - con_name, pyo.Constraint(model.scenarios, rule=global_design_fixing) - ) + # TODO: For symbolic gradients, there is probably a performance gain by not cloning the base model or deleting it. # Clean up the base model used to generate the scenarios model.del_component(model.base_model) - # ToDo: consider this logic? Multi-block systems need something more fancy + # TODO: consider this logic? Multi-block systems need something more fancy self._built_scenarios = True # Create objective function @@ -1468,7 +1663,8 @@ def compute_FIM_full_factorial( model: model to perform the full factorial exploration on design_ranges: dict of lists, of the form {: [start, stop, numsteps]} method: string to specify which method should be used - options are ``kaug`` and ``sequential`` + options are ``kaug`` and ``sequential``. If the gradient method is + "symbolic", this option is ignored. """ # Start timer diff --git a/pyomo/contrib/doe/examples/polynomial.py b/pyomo/contrib/doe/examples/polynomial.py new file mode 100644 index 00000000000..d38aae9ea9a --- /dev/null +++ b/pyomo/contrib/doe/examples/polynomial.py @@ -0,0 +1,152 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2025 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +# Note: This is a temporary copy of the exisiting example to verify the new +# symbolic gradient method works as expected. +# Once this works, we will remove this file and update the test suite. + +from pyomo.common.dependencies import numpy as np, pathlib + +from pyomo.contrib.doe import DesignOfExperiments + +import pyomo.environ as pyo + +from pyomo.contrib.parmest.experiment import Experiment + + +class PolynomialExperiment(Experiment): + def __init__(self, data): + """ + Arguments + --------- + data: object containing vital experimental information + """ + self.data = data + self.model = None + + ############################# + # End constructor definition + + def get_labeled_model(self): + if self.model is None: + self.create_model() + self.finalize_model() + self.label_experiment() + return self.model + + # Create flexible model without data + def create_model(self): + """ + Define the polynomial model for the experiment. + + y = a*x1 + b*x2 + c*x1*x2 + d + + Return + ------ + m: a Pyomo.DAE model + """ + + m = self.model = pyo.ConcreteModel() + + # Define model variables + + # Input variables (independent variables) + m.x1 = pyo.Var(bounds=(-5,5), initialize=1) + m.x2 = pyo.Var(bounds=(-5,5), initialize=1) + + # Model coefficients (unknown parameters) + m.a = pyo.Var(bounds=(-5,5), initialize=2) + m.b = pyo.Var(bounds=(-5,5), initialize=-1) + m.c = pyo.Var(bounds=(-5,5), initialize=0.5) + m.d = pyo.Var(bounds=(-5,5), initialize=-1) + + # Model output (dependent variable) + m.y = pyo.Var(initialize=0) + + # Define model equation + + @m.Constraint() + def output_equation(m): + return m.y == m.a * m.x1 + m.b * m.x2 + m.c * m.x1 * m.x2 + m.d + + + def finalize_model(self): + """ + This model is so simple that we do not need to finalize it. + + In a more complex example, we could discretize an ODE or DAE model, + etc. here + + """ + pass + + def label_experiment(self): + """ + Example for annotating (labeling) the model with a + full experiment. + """ + m = self.model + + # Set measurement labels + m.experiment_outputs = pyo.Suffix(direction=pyo.Suffix.LOCAL) + m.experiment_outputs[m.y] = None + + # Adding error for measurement values (assuming no covariance and constant error for all measurements) + m.measurement_error = pyo.Suffix(direction=pyo.Suffix.LOCAL) + m.measurement_error[m.y] = 1 + + # Identify design variables (experiment inputs) for the model + m.experiment_inputs = pyo.Suffix(direction=pyo.Suffix.LOCAL) + m.experiment_inputs[m.x1] = None + m.experiment_inputs[m.x2] = None + + # Add unknown parameter labels (using nominal values from the model) + m.unknown_parameters = pyo.Suffix(direction=pyo.Suffix.LOCAL) + m.unknown_parameters.update( + (k, pyo.value(k)) for k in [m.a, m.b, m.c, m.d] + ) + +def run_polynomial_doe(): + + # Create an experiment object + experiment = PolynomialExperiment(data=None) + + # Use the determinant objective with scaled sensitivity matrix + objective_option = "determinant" + scale_nominal_param_value = False + + # Create the DesignOfExperiments object + # We will not be passing any prior information in this example + # and allow the experiment object and the DesignOfExperiments + # call of ``run_doe`` perform model initialization. + doe_obj = DesignOfExperiments( + experiment, + gradient_method="symbolic", # Use finite difference method for gradient calculation + fd_formula=None, + step=1e-3, + objective_option=objective_option, + scale_constant_value=1, + scale_nominal_param_value=scale_nominal_param_value, + prior_FIM=None, + jac_initial=None, + fim_initial=None, + L_diagonal_lower_bound=1e-7, + solver=pyo.SolverFactory('ipopt'), # If none, use default in Pyomo.DoE (ipopt with ma57) + tee=False, + get_labeled_model_args=None, + _Cholesky_option=True, + _only_compute_fim_lower=True, + ) + + doe_obj.compute_FIM(method="kaug") + +if __name__ == "__main__": + run_polynomial_doe() \ No newline at end of file diff --git a/pyomo/contrib/doe/examples/reactor_example.py b/pyomo/contrib/doe/examples/reactor_example.py index 0fe81c723dd..4eb0bdff174 100644 --- a/pyomo/contrib/doe/examples/reactor_example.py +++ b/pyomo/contrib/doe/examples/reactor_example.py @@ -17,7 +17,6 @@ import json - # Example for sensitivity analysis on the reactor experiment # After sensitivity analysis is done, we perform optimal DoE def run_reactor_doe(): @@ -38,7 +37,7 @@ def run_reactor_doe(): experiment = ReactorExperiment(data=data_ex, nfe=10, ncp=3) # Use a central difference, with step size 1e-3 - fd_formula = "central" + gradient_method = "central" step_size = 1e-3 # Use the determinant objective with scaled sensitivity matrix @@ -51,7 +50,8 @@ def run_reactor_doe(): # call of ``run_doe`` perform model initialization. doe_obj = DesignOfExperiments( experiment, - fd_formula=fd_formula, + gradient_method=gradient_method, + fd_formula=None, # This arguement has been deprecated in favor of gradient_method step=step_size, objective_option=objective_option, scale_constant_value=1, @@ -60,13 +60,15 @@ def run_reactor_doe(): jac_initial=None, fim_initial=None, L_diagonal_lower_bound=1e-7, - solver=None, - tee=False, + solver=pyo.SolverFactory('ipopt'), # If none, use default in Pyomo.DoE (ipopt with ma57) + tee=True, get_labeled_model_args=None, _Cholesky_option=True, _only_compute_fim_lower=True, ) + ''' + # Make design ranges to compute the full factorial design design_ranges = {"CA[0]": [1, 5, 9], "T[0]": [300, 700, 9]} @@ -92,6 +94,7 @@ def run_reactor_doe(): figure_file_name="example_reactor_compute_FIM", log_scale=False, ) + ''' ########################### # End sensitivity analysis @@ -119,7 +122,18 @@ def run_reactor_doe(): ) ) - print(doe_obj.results["Experiment Design Names"]) + # This code demonstrates how to access the DoE results, + # store in a Pandas DataFrame, and print it out in a readable format. + import pandas as pd + print("\n***Optimal Experiment Design***") + + design_names = doe_obj.results["Experiment Design Names"] + design_values = doe_obj.results["Experiment Design"] + df = pd.DataFrame({ + "Decision Var": design_names, + "Value": [round(val, 2) for val in design_values] + }) + print(df) ################### # End optimal DoE diff --git a/pyomo/contrib/doe/examples/reactor_example_symbolic_doe.py b/pyomo/contrib/doe/examples/reactor_example_symbolic_doe.py new file mode 100644 index 00000000000..fcd1c94d227 --- /dev/null +++ b/pyomo/contrib/doe/examples/reactor_example_symbolic_doe.py @@ -0,0 +1,150 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2025 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +# Note: This is a temporary copy of the exisiting example to verify the new +# symbolic gradient method works as expected. +# Once this works, we will remove this file and update the test suite. + +from pyomo.common.dependencies import numpy as np, pathlib + +from pyomo.contrib.doe.examples.reactor_experiment import ReactorExperiment +from pyomo.contrib.doe import DesignOfExperiments + +import pyomo.environ as pyo + +import json + +import logging + + +# Example for sensitivity analysis on the reactor experiment +# After sensitivity analysis is done, we perform optimal DoE +def run_reactor_doe(): + # Read in file + DATA_DIR = pathlib.Path(__file__).parent + file_path = DATA_DIR / "result.json" + + with open(file_path) as f: + data_ex = json.load(f) + + # Put temperature control time points into correct format for reactor experiment + data_ex["control_points"] = { + float(k): v for k, v in data_ex["control_points"].items() + } + + # Create a ReactorExperiment object; data and discretization information are part + # of the constructor of this object + experiment = ReactorExperiment(data=data_ex, nfe=10, ncp=3) + + + # Use the determinant objective with scaled sensitivity matrix + objective_option = "determinant" + scale_nominal_param_value = True + + # Create the DesignOfExperiments object + # We will not be passing any prior information in this example + # and allow the experiment object and the DesignOfExperiments + # call of ``run_doe`` perform model initialization. + doe_obj = DesignOfExperiments( + experiment, + gradient_method="symbolic", # Use finite difference method for gradient calculation + fd_formula=None, + step=1e-3, + objective_option=objective_option, + scale_constant_value=1, + scale_nominal_param_value=scale_nominal_param_value, + prior_FIM=None, + jac_initial=None, + fim_initial=None, + L_diagonal_lower_bound=1e-7, + solver=pyo.SolverFactory('ipopt'), # If none, use default in Pyomo.DoE (ipopt with ma57) + tee=True, + logger_level=logging.INFO, + get_labeled_model_args=None, + _Cholesky_option=True, + _only_compute_fim_lower=True, + ) + + doe_obj.compute_FIM(method="sequential") + + + # Make design ranges to compute the full factorial design + design_ranges = {"CA[0]": [1, 5, 9], "T[0]": [300, 700, 9]} + + # Compute the full factorial design with the sequential FIM calculation + doe_obj.compute_FIM_full_factorial(design_ranges=design_ranges, method="kaug") + + # Plot the results + doe_obj.draw_factorial_figure( + sensitivity_design_variables=["CA[0]", "T[0]"], + fixed_design_variables={ + "T[0.125]": 300, + "T[0.25]": 300, + "T[0.375]": 300, + "T[0.5]": 300, + "T[0.625]": 300, + "T[0.75]": 300, + "T[0.875]": 300, + "T[1]": 300, + }, + title_text="Reactor Example", + xlabel_text="Concentration of A (M)", + ylabel_text="Initial Temperature (K)", + figure_file_name="example_reactor_compute_FIM", + log_scale=False, + ) + + ########################### + # End sensitivity analysis + + + # Begin optimal DoE + #################### + doe_obj.run_doe() + + # Print out a results summary + print("Optimal experiment values: ") + print( + "\tInitial concentration: {:.2f}".format( + doe_obj.results["Experiment Design"][0] + ) + ) + print( + ("\tTemperature values: [" + "{:.2f}, " * 8 + "{:.2f}]").format( + *doe_obj.results["Experiment Design"][1:] + ) + ) + print("FIM at optimal design:\n {}".format(np.array(doe_obj.results["FIM"]))) + print( + "Objective value at optimal design: {:.2f}".format( + pyo.value(doe_obj.model.objective) + ) + ) + + # This code demonstrates how to access the DoE results, + # store in a Pandas DataFrame, and print it out in a readable format. + import pandas as pd + print("\n***Optimal Experiment Design***") + + design_names = doe_obj.results["Experiment Design Names"] + design_values = doe_obj.results["Experiment Design"] + df = pd.DataFrame({ + "Decision Var": design_names, + "Value": [round(val, 2) for val in design_values] + }) + print(df) + + ################### + # End optimal DoE + + +if __name__ == "__main__": + run_reactor_doe() diff --git a/pyomo/contrib/doe/tests/test_doe_errors.py b/pyomo/contrib/doe/tests/test_doe_errors.py index 4c9823a251d..6fd66d2370f 100644 --- a/pyomo/contrib/doe/tests/test_doe_errors.py +++ b/pyomo/contrib/doe/tests/test_doe_errors.py @@ -657,11 +657,13 @@ def test_bad_FD_generate_scens(self): doe_obj = DesignOfExperiments(**DoE_args) + # We are modifying doe_obj._gradient_method to an invalid value to test error handling. + # The user should never modify doe_obj._gradient_method on their own with self.assertRaisesRegex( AttributeError, - "Finite difference option not recognized. Please contact the developers as you should not see this error.", + "Gradient method option not recognized. Please contact the developers as you should not see this error.", ): - doe_obj.fd_formula = "bad things" + doe_obj._gradient_method = "bad things" doe_obj._generate_scenario_blocks() @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") @@ -680,9 +682,9 @@ def test_bad_FD_seq_compute_FIM(self): with self.assertRaisesRegex( AttributeError, - "Finite difference option not recognized. Please contact the developers as you should not see this error.", + "Gradient method option not recognized. Please contact the developers as you should not see this error.", ): - doe_obj.fd_formula = "bad things" + doe_obj._gradient_method = "bad things" doe_obj.compute_FIM(method="sequential") def test_bad_objective(self): diff --git a/pyomo/contrib/doe/utils.py b/pyomo/contrib/doe/utils.py index 24be6fd696a..7459cc72dff 100644 --- a/pyomo/contrib/doe/utils.py +++ b/pyomo/contrib/doe/utils.py @@ -32,6 +32,12 @@ from pyomo.core.base.param import ParamData from pyomo.core.base.var import VarData +from pyomo.core.expr.calculus.diff_with_pyomo import reverse_sd, reverse_ad +from pyomo.core.expr.visitor import identify_variables +from pyomo.common.collections import ComponentSet + +import numpy as np +import pandas as pd # Rescale FIM (a scaling function to help rescale FIM from parameter values) def rescale_FIM(FIM, param_vals): @@ -100,3 +106,480 @@ def rescale_FIM(FIM, param_vals): # pass # ToDo: Write error for suffix keys that aren't ParamData or VarData # # return param_list + +class ExperimentGradients: + def __init__(self, experiment_model, symbolic=True, automatic=True, verbose=False): + """ + Initialize the ExperimentGradients class. + Parameters + ---------- + experiment_model : Pyomo model + The experiment model to analyze. + symbolic : bool, optional + If True, perform symbolic differentiation. Default is True. + automatic : bool, optional + If True, perform automatic differentiation. Default is True. + + Performance tip: + - If you are only interested in one type of differentiation (symbolic or automatic), + you can set the other to False to save computation time. + - If you will use the instance of this class to perform both symbolic and automatic differentiation, + you can set both symbolic and automatic to True here. + - This implementation assumes the experiment model will not be modified after this class is initialized. + + """ + + self.model = experiment_model + + self.verbose = verbose + + self._analyze_experiment_model() + + self.jac_dict_sd = None + self.jac_dict_ad = None + self.jac_measurements_wrt_param = None + + if symbolic or automatic: + # Analyze the experiment model to get the constraints and variables + self._perform_differentiation(symbolic, automatic) + + def _analyze_experiment_model(self): + """ + Partition the experiment model constraints and variables into + sets for equality constraints, outputs (measurements), inputs, + unknown parameters. + + This will build list of indices used for the performaning + symbolic differentiation and automatic differentiation. + """ + + model = self.model + + # Loop over the design variables and fix them + for v in model.experiment_inputs.keys(): + v.fix() + + # Loop over the unknown parameters and fix them + for v in model.unknown_parameters.keys(): + v.fix() + + # Parameters + # Create an empty component set + param_set = ComponentSet() + + # Loop over the unknown model parameters + for p in model.unknown_parameters.keys(): + param_set.add(p) + + # Assemble into a list + + param_list = list(param_set) + + # Measurements (outputs) + # Create an empty component set + output_set = ComponentSet() + + # Loop over the model outputs + for o in model.experiment_outputs.keys(): + output_set.add(o) + + # Assemble into a list + output_list = list(output_set) + + # Constraints and Variables + # Create empty component sets + con_set = ComponentSet() # These will be all constraints in the Pyomo model + var_set = ComponentSet() # These will be all Pyomo variables in the Pyomo model + + # Loop over the active model constraints + for c in model.component_data_objects(pyo.Constraint, descend_into=True, active=True): + + # Add constraint c to the constraint set + con_set.add(c) + + # Loop over the variables in the constraint c + # Note: changed this to include_fixed=True + # Changed back to False to fix problem degree of freedom issues + for v in identify_variables(c.body, include_fixed=False): + # Add variable v to the variable set + var_set.add(v) + + # recall that the parameters are fixed, so we did not + # get them above. Let's add them now. + for p in model.unknown_parameters.keys(): + if p not in var_set: + # If the parameter is not in the variable set, add it + var_set.add(p) + + # TODO: This may not be needed, likely remove + # Keep track of outputs that are fixed + outputs_fixed = ComponentSet() + + ''' + # Make sure all outputs are in the variable set + for o in model.experiment_outputs.keys(): + if o not in var_set: + # if the output is not in the variable set, + # it was fixed. Let's keep track of it + outputs_fixed.add(o) + + # And let's add it to the variable set + var_set.add(o) + + outputs_fixed.add(o) + ''' + + # TODO: Keep track of measurements that are fixed -- work in progress + + # TODO: Keep track of mappings between measurement output and index + # This is needed to assemble the Jacobian correctly + # 1. Suffix. key = experiment outputs (parameters), value is position in list of variables + # 2. Suffix. key = parameters , value is measurement error + # 3. Use these two suffixes + + # Keep track of the mapping from measurements to their index in the variable set + measurement_mapping = pyo.Suffix(direction=pyo.Suffix.LOCAL) + + # Keep track of the mapping from parameters to their index in the variable set + parameter_mapping = pyo.Suffix(direction=pyo.Suffix.LOCAL) + + # Assemble into lists + con_list = list(con_set) + var_list = list(var_set) + + # Create empty lists + param_index = [] + model_var_index = [] + measurement_index = [] + + # TODO: This is no longer needed, likely remove + # Adding a `included` suffix to only + # take outputs that are unfixed. This + # makes indices match. + measurement_error_included = pyo.Suffix(direction=pyo.Suffix.LOCAL) + + # Loop over the variables and determine which ones + # (and associated indices) are (a) parameters or + # (b) measurements + # TODO: Does this considered fixed variables? + # How does that change things? We fix all of our + # experiment inputs and unknown parameters. + for i, v in enumerate(var_set): + # Check if the variable is a parameter + if v in param_set: + # If yes, record its index + param_index.append(i) + + # Problem with this approach is that v is `scenario[0].` instead of just ``. + parameter_mapping[v] = i + + else: + # Otherwise, it is a model variable + model_var_index.append(i) + + # Check if the model variable is a measurement + if v in output_set: + # If yes, record its index + measurement_index.append(i) + measurement_error_included[v] = model.measurement_error[v] + + measurement_mapping[v] = i + + # Take care of measurements that were fixed + for o in model.experiment_outputs.keys(): + if o not in var_set: + # if the output is not in the variable set, + # it was fixed. + + # Store the index as None + measurement_mapping[o] = None + + # TODO: Check lengths here. The experiment model should be square if + # the experiment inputs and unknown parameters are fixed. + + num_measurements = len(output_set) + num_params = len(param_set) + num_constraints = len(con_set) + num_vars = len(var_set) + num_inputs = len(model.experiment_inputs) + + # TODO: This is likely not needed, likely remove + num_fixed_measurements = len(outputs_fixed) + + if self.verbose: + print("Experiment model size:") + + print(f" {num_vars} total variables") + print(f" {num_measurements} outputs (measurements)") + print(f" {num_inputs} inputs") + print(f" {num_params} unknown parameters") + print(f" {num_constraints} constraints\n") + + if num_vars - num_params - num_fixed_measurements != num_constraints: + raise ValueError("The model is not square: the number of variables minus unknown parameters does not equal the number of constraints.\n" \ + "This is required for the (automatic) differentiation to work correctly.") + + # Save terms that are needed for later + self.con_list = con_list + self.var_list = var_list + self.param_list = param_list + + self.param_index = param_index + self.model_var_index = model_var_index + self.measurement_index = measurement_index + + self.measurement_error_included = measurement_error_included + + self.num_measurements = num_measurements + self.num_params = num_params + self.num_constraints = num_constraints + self.num_vars = num_vars + + self.var_set = var_set + + self.measurement_mapping = measurement_mapping + self.parameter_mapping = parameter_mapping + + def _perform_differentiation(self, symbolic=True, automatic=True): + + # Initialize dictionaries to hold the Jacobian entries + if symbolic: + jac_dict_sd = {} + if automatic: + jac_dict_ad = {} + + if not symbolic and not automatic: + raise ValueError("At least one differentiation method must be selected: symbolic or automatic.") + + # Grab data needed for the differentiation + con_list = self.con_list + var_list = self.var_list + + # Enumerate over the constraints + for i, c in enumerate(con_list): + # Check we only have equality constraints... otherwise this gets more complicated + assert c.equality, "This function only works with equality constraints" + + # Perform symbolic differentiation + if symbolic: + der_map_sd = reverse_sd(c.body) + + if automatic: + der_map_ad = reverse_ad(c.body) + + # Loop over the Pyomo variables, which includes + # parameters, measurements, control decisions + for j, v in enumerate(var_list): + + # Symbolic differentiation + if symbolic: + # Check if the variable is in the derivative map + if v in der_map_sd: + # Record the expression + deriv = der_map_sd[v] + else: + # Otherwise, record 0 + deriv = 0 + # Save results in the Jacobian dictionary + jac_dict_sd[(i, j)] = deriv + + # Automatic differentiation + if automatic: + if v in der_map_ad: + # Record the expression + deriv = der_map_ad[v] + else: + # Otherwise, record 0 + deriv = 0 + # Save results in the Jacobian dictionary + jac_dict_ad[(i, j)] = deriv + + if symbolic: + self.jac_dict_sd = jac_dict_sd + if automatic: + self.jac_dict_ad = jac_dict_ad + + def compute_gradient_outputs_wrt_unknown_parameters(self): + """ Perform automatic differentiation to compute the gradients of the outputs + with respect to the unknown parameters. + + + """ + + if self.jac_dict_ad is None: + # Perform automatic differentiation if not already done + self._perform_differentiation(symbolic=False, automatic=True) + + # Grab the necessary data from the instance + # (this keeps variable names shorter below) + num_constraints = self.num_constraints + num_params = self.num_params + num_measurements = self.num_measurements + param_index = self.param_index + model_var_index = self.model_var_index + jac_dict_ad = self.jac_dict_ad + measurement_index = self.measurement_index + + jac_con_wrt_param = np.zeros((num_constraints, num_params)) + for i in range(num_constraints): + for j, p in enumerate(param_index): + jac_con_wrt_param[i, j] = jac_dict_ad[(i, p)] + + jac_con_wrt_vars = np.zeros((num_constraints, len(model_var_index))) + for i in range(num_constraints): + for j, v in enumerate(model_var_index): + jac_con_wrt_vars[i, j] = jac_dict_ad[(i, v)] + + if self.verbose: + print(f"Jacobian of constraints with respect to parameters shape: {jac_con_wrt_param.shape}") + print(f"Jacobian of constraints with respect to variables shape: {jac_con_wrt_vars.shape}") + + jac_vars_wrt_param = np.linalg.solve( + jac_con_wrt_vars, -jac_con_wrt_param + ) + + jac_measurements_wrt_param = np.zeros((num_measurements, num_params)) + + # print(f"Jacobian of all variables with respect to parameters:\n{jac_vars_wrt_param}") + + # TODO: What about measurements that are fixed? They should be insensitive to changes in the model parameters. + + # TODO: Need to convert the order of measurement here (corresponds to var_set) with the order in experiment_outputs + # If the experiment_output is NOT in var_set, then it's row should be all zeros + # Pseudocode: + for ind, m in enumerate(self.model.experiment_outputs.keys()): + + ''' + + if m not in self.var_set: + if self.verbose: + # If the measurement is not in the variable set, print a message + # and skip it + print('Measurement {} is not in the variable list.'.format(m)) + # Set the row in jac_measurements_wrt_param to all zeros + jac_measurements_wrt_param[ind, :] = 0.0 + else: + # Find the index of the measurement in the variable list + + i = None + + # Pyomo team: Is there a better way to do this? + for j, v in enumerate(self.var_set): + if v is m: + # Found the measurement in the variable set + # Get the index of the measurement in the variable list + # This is needed to get the row in jac_vars_wrt_param + # that corresponds to this measurement + i = j + break + + if self.verbose: + print(f"Measurement {m} found at index {i} in variable set.") + + jac_measurements_wrt_param[ind, :] = jac_vars_wrt_param[i, :] + + ''' + + i = self.measurement_mapping[m] + + if i is None: + jac_measurements_wrt_param[ind, :] = 0.0 + else: + # If the measurement is in the variable set, get the row + jac_measurements_wrt_param[ind, :] = jac_vars_wrt_param[i, :] + + + # The measurement_index is the index of the measurement in the var_set + + # 1. Look over experiment_outputs + # 2. Find index for element in var_set + # 3. Grab the row correspond to the index from jac_vars_wrt_param + # 4. Store in numpy array for Jacobian + + # jac_measurements_wrt_param = jac_vars_wrt_param[measurement_index, :] + + # print(f"Jacobian of measurements with respect to parameters:\n{jac_measurements_wrt_param}") + + self.jac_measurements_wrt_param = jac_measurements_wrt_param + + return jac_measurements_wrt_param + + def _package_jac_as_df(self, jac): + """ + Convert a numpy array containing the Jacobian into a + pandas DataFrame + + Arguments: + jac: numpy array where rows are measurements and columns are parameters + + Returns: + pandas DataFrame + + """ + + var_list = self.var_list + measurement_index = self.measurement_index + param_index = self.param_index + + row_names = [str(o) for o in self.model.experiment_outputs.keys()] + col_names = [str(p) for p in self.model.unknown_parameters.keys()] + + return pd.DataFrame(jac, index=row_names, columns=col_names) + + def get_numeric_sensitivity_as_df(self): + if not self.jac_measurements_wrt_param: + self.compute_gradient_outputs_wrt_unknown_parameters() + + return self._package_jac_as_df(self.jac_measurements_wrt_param) + + def get_sensitivities_from_symbolic_as_df(self): + + model = self.model + + if not hasattr(model, 'jacobian_constraint'): + self.construct_sensitivity_constraints() + + solver = pyo.SolverFactory('ipopt') + results1 = solver.solve(model, tee=self.verbose) + + jac = np.zeros((len(model.measurement_index), len(model.param_index))) + + for i,y in enumerate(model.measurement_index): + for j,p in enumerate(model.param_index): + jac[i,j] = model.jac_variables_wrt_param[y,p].value + + return self._package_jac_as_df(jac) + + def construct_sensitivity_constraints(self, model=None): + + if self.jac_dict_sd is None: + # Perform symbolic differentiation if not already done + self._perform_differentiation(symbolic=True, automatic=False) + + # Decision: Build these constraints in the model. Pyomo.DoE can look into scenarion[0] + + if model is None: + model = self.model + + # Using the lists of indices to create Pyomo Sets + model.param_index = pyo.Set(initialize=self.param_index) + model.measurement_index = pyo.Set(initialize=self.measurement_index) + model.constraint_index = pyo.Set(initialize=range(len(self.con_list))) + model.var_index = pyo.Set(initialize=self.model_var_index) + + # Define a Pyomo variable for the Jacobian of the model variables + # (everything except parameters) with respect to the model parameters + model.jac_variables_wrt_param = pyo.Var(model.var_index, model.param_index, initialize=0) + + # Calculate the Jacobian using the chain rule and total derivative definitions + # + # Prior comment: + # This has an index mistake... jac_dict_sd includes the parameters, but var_index skips them + # We need to be more careful about the indices + # + # New reflection: + # var_index is built from the indices in var_list, which includes the parameters + # I think this is okay + @model.Constraint(model.constraint_index, model.param_index) + def jacobian_constraint(model, i, j): + return self.jac_dict_sd[(i,j)] == -sum(model.jac_variables_wrt_param[k,j] * self.jac_dict_sd[(i,k)] for k in model.var_index) \ No newline at end of file