diff --git a/include/openmc/constants.h b/include/openmc/constants.h index 252194528c7..d570ac017a6 100644 --- a/include/openmc/constants.h +++ b/include/openmc/constants.h @@ -281,7 +281,7 @@ enum class MgxsType { // ============================================================================ // TALLY-RELATED CONSTANTS -enum class TallyResult { VALUE, SUM, SUM_SQ, SIZE }; +enum class TallyResult { VALUE, SUM, SUM_SQ, SUM_THIRD, SUM_FOURTH, SIZE }; enum class TallyType { VOLUME, MESH_SURFACE, SURFACE, PULSE_HEIGHT }; diff --git a/include/openmc/hdf5_interface.h b/include/openmc/hdf5_interface.h index 0092c08f8df..ca8f40fc046 100644 --- a/include/openmc/hdf5_interface.h +++ b/include/openmc/hdf5_interface.h @@ -100,8 +100,8 @@ void read_llong(hid_t obj_id, const char* name, long long* buffer, bool indep); void read_string( hid_t obj_id, const char* name, size_t slen, char* buffer, bool indep); -void read_tally_results( - hid_t group_id, hsize_t n_filter, hsize_t n_score, double* results); +void read_tally_results(hid_t group_id, hsize_t n_filter, hsize_t n_score, + double* results, bool vov_results); void write_attr_double(hid_t obj_id, int ndim, const hsize_t* dims, const char* name, const double* buffer); void write_attr_int(hid_t obj_id, int ndim, const hsize_t* dims, @@ -114,9 +114,9 @@ void write_int(hid_t group_id, int ndim, const hsize_t* dims, const char* name, void write_llong(hid_t group_id, int ndim, const hsize_t* dims, const char* name, const long long* buffer, bool indep); void write_string(hid_t group_id, int ndim, const hsize_t* dims, size_t slen, - const char* name, char const* buffer, bool indep); -void write_tally_results( - hid_t group_id, hsize_t n_filter, hsize_t n_score, const double* results); + const char* name, const char* buffer, bool indep); +void write_tally_results(hid_t group_id, hsize_t n_filter, hsize_t n_score, + const double* results, bool vov_results); } // extern "C" //============================================================================== diff --git a/include/openmc/tallies/tally.h b/include/openmc/tallies/tally.h index 48f678ced0f..bc724889af9 100644 --- a/include/openmc/tallies/tally.h +++ b/include/openmc/tallies/tally.h @@ -106,6 +106,8 @@ class Tally { bool writable() const { return writable_; } + bool vov_results() const { return vov_; } + //---------------------------------------------------------------------------- // Other methods. @@ -119,6 +121,8 @@ class Tally { void accumulate(); + void accumulate_vov(); + //! return the index of a score specified by name int score_index(const std::string& score) const; @@ -192,6 +196,8 @@ class Tally { //! Whether to multiply by atom density for reaction rates bool multiply_density_ {true}; + bool vov_ = {false}; + int64_t index_; }; diff --git a/include/openmc/urr.h b/include/openmc/urr.h index 3978c7b86a2..1e603715847 100644 --- a/include/openmc/urr.h +++ b/include/openmc/urr.h @@ -27,10 +27,10 @@ class UrrData { double heating; }; - Interpolation interp_; //!< interpolation type - int inelastic_flag_; //!< inelastic competition flag - int absorption_flag_; //!< other absorption flag - bool multiply_smooth_; //!< multiply by smooth cross section? + Interpolation interp_; //!< interpolation type + int inelastic_flag_; //!< inelastic competition flag + int absorption_flag_; //!< other absorption flag + bool multiply_smooth_; //!< multiply by smooth cross section? vector energy_; //!< incident energies auto n_energy() const { return energy_.size(); } diff --git a/include/openmc/volume_calc.h b/include/openmc/volume_calc.h index fa8d3d65ece..3e5be8b3a5e 100644 --- a/include/openmc/volume_calc.h +++ b/include/openmc/volume_calc.h @@ -34,7 +34,7 @@ class VolumeCalculation { vector atoms; //!< Number of atoms for each nuclide vector uncertainty; //!< Uncertainty on number of atoms int iterations; //!< Number of iterations needed to obtain the results - }; // Results for a single domain + }; // Results for a single domain // Constructors VolumeCalculation(pugi::xml_node node); diff --git a/openmc/__init__.py b/openmc/__init__.py index bb972b4e6ad..fa55b910b42 100644 --- a/openmc/__init__.py +++ b/openmc/__init__.py @@ -24,6 +24,7 @@ from openmc.trigger import * from openmc.tally_derivative import * from openmc.tallies import * +from openmc.tally_stats import * from openmc.mgxs_library import * from openmc.executor import * from openmc.statepoint import * diff --git a/openmc/settings.py b/openmc/settings.py index bd3a89e11cc..e88c4345280 100644 --- a/openmc/settings.py +++ b/openmc/settings.py @@ -2078,7 +2078,7 @@ def _max_tracks_from_xml_element(self, root): text = get_text(root, 'max_tracks') if text is not None: self.max_tracks = int(text) - + def _random_ray_from_xml_element(self, root): elem = root.find('random_ray') if elem is not None: diff --git a/openmc/statepoint.py b/openmc/statepoint.py index 715becf4882..dfc8dc9879d 100644 --- a/openmc/statepoint.py +++ b/openmc/statepoint.py @@ -429,6 +429,11 @@ def tallies(self): if "multiply_density" in group.attrs: tally.multiply_density = group.attrs["multiply_density"].item() > 0 + # Check if tally has vov attribute + if "vov_results" in group.attrs: + print("VOV results found", group.attrs["vov_results"].item()) + tally._vov = group.attrs["vov_results"].item() + # Read the number of realizations n_realizations = group['n_realizations'][()] @@ -460,6 +465,7 @@ def tallies(self): # Add the scores to the Tally scores = group['score_bins'][()] tally.scores = [score.decode() for score in scores] + print(scores.shape) # Add Tally to the global dictionary of all Tallies tally.sparse = self.sparse diff --git a/openmc/tallies.py b/openmc/tallies.py index 06de0205c9e..0bfb8129982 100644 --- a/openmc/tallies.py +++ b/openmc/tallies.py @@ -13,12 +13,13 @@ import pandas as pd import scipy.sparse as sps + import openmc import openmc.checkvalue as cv from ._xml import clean_indentation, reorder_attributes, get_text from .mixin import IDManagerMixin from .mesh import MeshBase - +from . import tally_stats # The tally arithmetic product types. The tensor product performs the full # cross product of the data in two tallies with respect to a specified axis @@ -57,7 +58,7 @@ class Tally(IDManagerMixin): Name of the tally multiply_density : bool Whether reaction rates should be multiplied by atom density - + .. versionadded:: 0.14.0 filters : list of openmc.Filter List of specified filters for the tally @@ -91,10 +92,20 @@ class Tally(IDManagerMixin): sum_sq : numpy.ndarray An array containing the sum of each independent realization squared for each bin + sum_rd : numpy.ndarray + An array containing the sum of each independent realization tp the third power for + each bin + sum_th : numpy.ndarray + An array containing the sum of each independent realization to the fourth power for + each bin mean : numpy.ndarray An array containing the sample mean for each bin std_dev : numpy.ndarray An array containing the sample standard deviation for each bin + vov : bool + Whether the tally will accumulate sum third and sum fourth for each tally bin + normality_tests = None + Whether normality tests will be performed on the tally results figure_of_merit : numpy.ndarray An array containing the figure of merit for each bin @@ -129,8 +140,13 @@ def __init__(self, tally_id=None, name=''): self._sum = None self._sum_sq = None + self._sum_rd = None + self._sum_th = None self._mean = None self._std_dev = None + self._vov = None + self._normality_tests = None + self._sig_level = 0.05 self._simulation_time = None self._with_batch_statistics = False self._derived = False @@ -215,12 +231,34 @@ def name(self, name): @property def multiply_density(self): return self._multiply_density - + @multiply_density.setter def multiply_density(self, value): cv.check_type('multiply density', value, bool) self._multiply_density = value + @property + def vov(self): + return self._vov + + @vov.setter + def vov(self, value): + cv.check_type('vov', value, bool) + self._vov = value + + @property + def normality_tests(self): + return self._normality_tests + + @normality_tests.setter + def normality_tests(self, value): + cv.check_type('normality tests', value, bool) + self._normality_tests = value + + @property + def sig_level(self): + return self._sig_level + @property def filters(self): return self._filters @@ -371,20 +409,34 @@ def _read_results(self): # Update nuclides nuclide_names = group['nuclides'][()] self._nuclides = [name.decode().strip() for name in nuclide_names] + self._vov = 'vov_results' in group.attrs # Extract Tally data from the file data = group['results'] sum_ = data[:, :, 0] sum_sq = data[:, :, 1] - + # Reshape the results arrays sum_ = np.reshape(sum_, self.shape) sum_sq = np.reshape(sum_sq, self.shape) - + # Set the data for this Tally self._sum = sum_ self._sum_sq = sum_sq + if data.shape[2] == 4: + + self._vov = True + sum_rd = data[:, :, 2] + sum_th = data[:, :, 3] + sum_rd = np.reshape(sum_rd, self.shape) + sum_th = np.reshape(sum_th, self.shape) + self._sum_rd = sum_rd + self._sum_th = sum_th + else: + self._vov = False + + # Convert NumPy arrays to SciPy sparse LIL matrices if self.sparse: self._sum = sps.lil_matrix(self._sum.flatten(), self._sum.shape) @@ -427,6 +479,38 @@ def sum_sq(self): def sum_sq(self, sum_sq): cv.check_type('sum_sq', sum_sq, Iterable) self._sum_sq = sum_sq + + @property + @ensure_results + def sum_rd(self): + if not self._sp_filename or self.derived: + return None + + if self.sparse: + return np.reshape(self._sum_rd.toarray(), self.shape) + else: + return self._sum_rd + + @sum_rd.setter + def sum_rd(self, sum_rd): + cv.check_type('sum_rd', sum_rd, Iterable) + self._sum_rd = sum_rd + + @property + @ensure_results + def sum_th(self): + if not self._sp_filename or self.derived: + return None + + if self.sparse: + return np.reshape(self._sum_th.toarray(), self.shape) + else: + return self._sum_th + + @sum_th.setter + def sum_th(self, sum_th): + cv.check_type('sum_th', sum_th, Iterable) + self._sum_th = sum_th @property def mean(self): @@ -469,6 +553,42 @@ def std_dev(self): return np.reshape(self._std_dev.toarray(), self.shape) else: return self._std_dev + + @property + def VOV(self): + if not self._sp_filename: + return None + + n = self.num_realizations + nonzero = np.abs(self.mean) > 0 + vov = np.zeros_like(self.mean) + numerator = self.sum_th - ( 4.0* self.sum_rd*self.sum ) / n + (6.0 * self.sum_sq * (self.sum)**2) / (n**2) - (3.0 * (self.sum)**4 ) / (n**3) + denominator = (self.sum_sq - (1.0/n)*(self.sum)**2) **2 + + # Apply the nonzero mask to numerator and denominator + numerator = numerator[nonzero] + denominator = denominator[nonzero] + + # Ensure the shapes match + numerator = np.reshape(numerator, vov[nonzero].shape) + denominator = np.reshape(denominator, vov[nonzero].shape) + + vov[nonzero] = numerator / denominator - 1.0 / n + + return vov + + @property + def normality_test(self): + if not self._sp_filename: + return None + + n = self.num_realizations + if n < 8: + raise ValueError("Skewness test is not well-defined for n < 8.") + elif n < 20: + raise ValueError("Kurtosis test is typically recommended for n >= 20.") + else: + tally_stats.print_normality_tests_summary(n, self.mean, self.sum_sq, self.sum_rd, self.sum_th, self._sig_level) @property def figure_of_merit(self): @@ -528,6 +648,12 @@ def sparse(self, sparse): if self._sum_sq is not None: self._sum_sq = sps.lil_matrix(self._sum_sq.flatten(), self._sum_sq.shape) + if self._sum_rd is not None: + self._sum_rd = sps.lil_matrix(self._sum_rd.flatten(), + self._sum_rd.shape) + if self._sum_th is not None: + self._sum_th = sps.lil_matrix(self._sum_th.flatten(), + self._sum_th.shape) if self._mean is not None: self._mean = sps.lil_matrix(self._mean.flatten(), self._mean.shape) @@ -543,6 +669,10 @@ def sparse(self, sparse): self._sum = np.reshape(self._sum.toarray(), self.shape) if self._sum_sq is not None: self._sum_sq = np.reshape(self._sum_sq.toarray(), self.shape) + if self._sum_rd is not None: + self._sum_rd = np.reshape(self._sum_rd.toarray(), self.shape) + if self._sum_th is not None: + self._sum_th = np.reshape(self._sum_th.toarray(), self.shape) if self._mean is not None: self._mean = np.reshape(self._mean.toarray(), self.shape) if self._std_dev is not None: @@ -868,6 +998,34 @@ def merge(self, other): axis=merge_axis) merged_tally._sum_sq = np.reshape(merged_sum_sq, merged_tally.shape) + + # Concatenate sum_rd arrays if present in both tallies + if self.sum_rd is not None and other.sum_rd is not None: + self_sum_rd = self.get_reshaped_data(value='sum_rd') + other_sum_rd = other_copy.get_reshaped_data(value='sum_rd') + + if join_right: + merged_sum_rd = np.concatenate((self_sum_rd, other_sum_rd), + axis=merge_axis) + else: + merged_sum_rd = np.concatenate((other_sum_rd, self_sum_rd), + axis=merge_axis) + + merged_tally._sum_rd = np.reshape(merged_sum_rd, merged_tally.shape) + + # Concatenate sum_th arrays if present in both tallies + if self.sum_th is not None and other.sum_th is not None: + self_sum_th = self.get_reshaped_data(value='sum_th') + other_sum_th = other_copy.get_reshaped_data(value='sum_th') + + if join_right: + merged_sum_th = np.concatenate((self_sum_th, other_sum_th), + axis=merge_axis) + else: + merged_sum_th = np.concatenate((other_sum_th, self_sum_th), + axis=merge_axis) + + merged_tally._sum_th = np.reshape(merged_sum_th, merged_tally.shape) # Concatenate mean arrays if present in both tallies if self.mean is not None and other.mean is not None: @@ -958,6 +1116,11 @@ def to_xml_element(self): subelement = ET.SubElement(element, "derivative") subelement.text = str(self.derivative.id) + # Optional VOV + if self._vov: + subelement = ET.SubElement(element, "vov") + subelement.text = str(self._vov).lower() + return element def add_results(self, statepoint: cv.PathLike | openmc.StatePoint): @@ -984,6 +1147,8 @@ def add_results(self, statepoint: cv.PathLike | openmc.StatePoint): # point are based on the current statepoint file self._sum = None self._sum_sq = None + self._sum_rd = None + self._sum_th = None self._mean = None self._std_dev = None self._num_realizations = 0 @@ -1356,7 +1521,9 @@ def get_values(self, scores=[], filters=[], filter_bins=[], (value == 'std_dev' and self.std_dev is None) or \ (value == 'rel_err' and self.mean is None) or \ (value == 'sum' and self.sum is None) or \ - (value == 'sum_sq' and self.sum_sq is None): + (value == 'sum_sq' and self.sum_sq is None) or \ + (value == 'sum_rd' and self.sum_th is None) or \ + (value == 'sum_th' and self.sum_rd is None): msg = f'The Tally ID="{self.id}" has no data to return' raise ValueError(msg) @@ -1379,6 +1546,10 @@ def get_values(self, scores=[], filters=[], filter_bins=[], data = self.sum[indices] elif value == 'sum_sq': data = self.sum_sq[indices] + elif value == 'sum_rd': + data = self.sum_rd[indices] + elif value == 'sum_th': + data = self.sum_th[indices] else: msg = f'Unable to return results from Tally ID="{value}" since ' \ f'the requested value "{self.id}" is not \'mean\', ' \ @@ -2712,6 +2883,15 @@ def get_slice(self, scores=[], filters=[], filter_bins=[], nuclides=[], new_sum_sq = self.get_values(scores, filters, filter_bins, nuclides, 'sum_sq') new_tally.sum_sq = new_sum_sq + if not self.derived and self.sum_rd is not None: + new_sum_rd = self.get_values(scores, filters, filter_bins, + nuclides, 'sum_rd') + new_tally.sum_rd = new_sum_rd + if not self.derived and self.sum_th is not None: + new_sum_th = self.get_values(scores, filters, filter_bins, + nuclides, 'sum_th') + new_tally.sum_th = new_sum_th + if self.mean is not None: new_mean = self.get_values(scores, filters, filter_bins, nuclides, 'mean') @@ -3152,6 +3332,12 @@ def diagonalize_filter(self, new_filter, filter_position=-1): if not self.derived and self.sum_sq is not None: new_tally._sum_sq = np.zeros(new_tally.shape, dtype=np.float64) new_tally._sum_sq[diag_indices, :, :] = self.sum_sq + if not self.derived and self.sum_rd is not None: + new_tally._sum_rd = np.zeros(new_tally.shape, dtype=np.float64) + new_tally._sum_rd[diag_indices, :, :] = self.sum_rd + if not self.derived and self.sum_th is not None: + new_tally._sum_th = np.zeros(new_tally.shape, dtype=np.float64) + new_tally._sum_th[diag_indices, :, :] = self.sum_th if self.mean is not None: new_tally._mean = np.zeros(new_tally.shape, dtype=np.float64) new_tally._mean[diag_indices, :, :] = self.mean @@ -3163,7 +3349,6 @@ def diagonalize_filter(self, new_filter, filter_position=-1): new_tally.sparse = self.sparse return new_tally - class Tallies(cv.CheckedList): """Collection of Tallies used for an OpenMC simulation. @@ -3352,7 +3537,7 @@ def export_to_xml(self, path='tallies.xml'): # Write the XML Tree to the tallies.xml file tree = ET.ElementTree(root_element) tree.write(str(p), xml_declaration=True, encoding='utf-8') - + @classmethod def from_xml_element(cls, elem, meshes=None): """Generate tallies from an XML element @@ -3418,4 +3603,4 @@ def from_xml(cls, path='tallies.xml'): parser = ET.XMLParser(huge_tree=True) tree = ET.parse(path, parser=parser) root = tree.getroot() - return cls.from_xml_element(root) + return cls.from_xml_element(root) \ No newline at end of file diff --git a/openmc/tally_stats.py b/openmc/tally_stats.py new file mode 100644 index 00000000000..94564927e18 --- /dev/null +++ b/openmc/tally_stats.py @@ -0,0 +1,150 @@ +import numpy as np +from math import sqrt, log +from scipy.stats import norm, chi2, shapiro +import matplotlib.pyplot as plt + +""" +[1] "A Suggestion for Using Powerful and Informative Tests of Normality" +Ralph B. D'Agostino, Albert Belanger, Ralph B. D'Agostino, Jr. +The American Statistician, Vol. 44, No. 4 (Nov., 1990), pp. 316-321 +""" + +def _calc_b1_b2(n, mean, sum_sq, sum_rd, sum_th): + """ + Compute sample skewness and kurtosis based on the tallies data. + """ + m2 = (sum_sq/n - mean**2) + m3 = 1.0/n * (sum_rd - 3*mean*sum_sq) + 2*mean**3 + m4 = 1.0/n * (sum_th - 4*mean*sum_rd + 6*mean**2*sum_sq) - 3*mean**4 + + # sample skewness + sqrt_b1 = np.where(m2 > 0, m3 / (m2 ** (1.5)), 0.0) + #b1 = raw_skew ** 2.0 + + # sample kurtosis + b2 = np.where(m2 > 0, m4 / (m2 ** 2), 0.0) + return sqrt_b1, b2 + +def skewness_test(n, mean, sum_sq, sum_rd, sum_th, alternative = "two-sided"): + """ + Performs the D'Agostino-Pearson test for skewness [1]. + """ + + sqrt_b1, _ = _calc_b1_b2(n, mean, sum_sq, sum_rd, sum_th) + + y = sqrt_b1 * np.sqrt(((n + 1.0) * (n + 3.0)) / (6.0 * (n - 2.0))) + + beta2 = (3.0 * (n**2 + 27.0*n - 70.0) * (n + 1.0) * (n + 3.0)) / ((n - 2.0) * (n + 5.0) * (n + 7.0) * (n + 9.0)) + W_squared = -1.0 + np.sqrt(2.0 * (beta2 - 1.0)) + delta = 1.0 / np.sqrt(np.log(np.sqrt(W_squared))) + alpha = np.sqrt(2.0 / (W_squared - 1.0)) + + Zb1 = np.where(y >= 0, delta * np.log((y / alpha) + np.sqrt((y / alpha) ** 2 + 1)), + -delta * np.log((-y / alpha) + np.sqrt((y / alpha) ** 2 + 1)) + ) + + if alternative == "two-sided": + pval = 2.0 * (1.0 - norm.cdf(abs(Zb1))) + elif alternative == "greater": + pval = 1.0 - norm.cdf(Zb1) + elif alternative == "less": + pval = norm.cdf(Zb1) + + return Zb1, pval, sqrt_b1 + +def kurtosis_test(n, mean, sum_sq, sum_rd, sum_th, alternative = "two-sided"): + """ + Performs the D'Agostino-Pearson test for kurtosis [1]. + """ + + _, b2 = _calc_b1_b2(n, mean, sum_sq, sum_rd, sum_th) + + mean_b2 = 3.0 * (n - 1.0) / (n + 1.0) + var_b2 = 24.0 * n * (n - 2.0) * (n - 3.0) / ((n + 1.0) ** 2 * (n + 3.0) * (n + 5.0)) + + x = (b2 - mean_b2) / np.sqrt(var_b2) + + # Compute the third moment and transformation parameter A + moment = ((6.0 * (n**2 - 5.0*n + 2.0)) / ((n + 7.0) * (n + 9.0))) * np.sqrt((6.0 * (n + 3.0) * (n + 5.0)) / ((n * (n - 2.0) * (n - 3.0)))) + A = 6.0 + (8.0 / moment) * ( (2.0 / moment) + np.sqrt(1.0 + 4.0 / (moment ** 2)) ) + Zb2 = (1.0 - 2.0 / (9.0 * A) - ((1.0 - 2.0 / A) / (1.0 + x * np.sqrt(2.0 / (A - 4.0)))) ** (1.0 / 3.0)) / np.sqrt(2.0 / (9.0 * A)) + + if alternative == "two-sided": + pval = 2.0 * (1.0 - norm.cdf(abs(Zb2))) + elif alternative == "greater": + pval = 1.0 - norm.cdf(Zb2) + elif alternative == "less": + pval = norm.cdf(Zb2) + + return Zb2, pval, b2 + +def k2_test(Zb1, Zb2): + """ + Omnibus test of normality that combines skewness and kurtosis [1]. + """ + K2 = Zb1**2 + Zb2**2 + pval = 1.0 - chi2.cdf(K2, 2) + return K2, pval + +def print_normality_tests_summary(n, mean, sum_sq, sum_rd, sum_th, sig_level, + skew_alternative = "two-sided", + kurt_alternative = "two-sided"): + """ + Summary of the skewness and kurtosis tests. + """ + # Ensure inputs are NumPy arrays for consistent handling + mean = np.asarray(mean) + sum_sq = np.asarray(sum_sq) + sum_rd = np.asarray(sum_rd) + sum_th = np.asarray(sum_th) + + # Determine the number of filters (assumes the first dimension corresponds to filters) + num_filters = mean.shape[0] if mean.ndim > 1 else 1 + + for i in range(num_filters): + + # Extract data for the current filter + current_mean = mean[i] if num_filters > 1 else mean + current_sum_sq = sum_sq[i] if num_filters > 1 else sum_sq + current_sum_rd = sum_rd[i] if num_filters > 1 else sum_rd + current_sum_th = sum_th[i] if num_filters > 1 else sum_th + + # Perform skewness and kurtosis tests + Zb1, p_skew, sqrt_b1 = skewness_test( + n, current_mean, current_sum_sq, current_sum_rd, current_sum_th, alternative=skew_alternative + ) + Zb2, p_kurt, b2 = kurtosis_test( + n, current_mean, current_sum_sq, current_sum_rd, current_sum_th, alternative=kurt_alternative + ) + K2, p_omni = k2_test(Zb1, Zb2) + + # Print skewness results + print("\nSkewness Test Result:") + print(f"Raw skewness: {sqrt_b1}") + print(f"Z statistic: {Zb1}, p-value: {p_skew}") + if p_skew < sig_level: + if np.all(sqrt_b1 > 0): + print("Data is right-skewed") + elif np.all(sqrt_b1 < 0): + print("Data is left-skewed") + else: + print("Data is symmetric.") + + # Print kurtosis results + print("\nKurtosis Test Result:") + print(f"Kurtosis: {b2}") + print(f"Z statistic: {Zb2}, p-value: {p_kurt}") + if p_kurt < sig_level: + if np.all(b2 > (3*(n-1)/(n+1)) ): + print("Distribution is leptokurtic") + elif np.all(b2 < (3*(n-1)/(n+1))): + print("Distribution is platykurtic") + else: + print("Distribution has normal kurtosis.") + + print("\nOmnibus Test Result:") + print(f"K2 statistic: {K2}, p-value: {p_omni}") + if p_omni < sig_level: + print("Data is not normally distributed.") + else: + print("Data is normally distributed.") \ No newline at end of file diff --git a/src/hdf5_interface.cpp b/src/hdf5_interface.cpp index e90aa74901e..6a0d4d9945d 100644 --- a/src/hdf5_interface.cpp +++ b/src/hdf5_interface.cpp @@ -537,23 +537,42 @@ void read_complex( H5Tclose(complex_id); } -void read_tally_results( - hid_t group_id, hsize_t n_filter, hsize_t n_score, double* results) +void read_tally_results(hid_t group_id, hsize_t n_filter, hsize_t n_score, + double* results, bool vov_results) { // Create dataspace for hyperslab in memory constexpr int ndim = 3; - hsize_t dims[ndim] {n_filter, n_score, 3}; - hsize_t start[ndim] {0, 0, 1}; - hsize_t count[ndim] {n_filter, n_score, 2}; - hid_t memspace = H5Screate_simple(ndim, dims, nullptr); - H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, nullptr, count, nullptr); + if (vov_results) { + hsize_t dims[ndim] {n_filter, n_score, 5}; + hsize_t start[ndim] {0, 0, 1}; + hsize_t count[ndim] {n_filter, n_score, 4}; + hid_t memspace = H5Screate_simple(ndim, dims, nullptr); + H5Sselect_hyperslab( + memspace, H5S_SELECT_SET, start, nullptr, count, nullptr); + + // Read the dataset + read_dataset_lowlevel( + group_id, "results", H5T_NATIVE_DOUBLE, memspace, false, results); - // Read the dataset - read_dataset_lowlevel( - group_id, "results", H5T_NATIVE_DOUBLE, memspace, false, results); + // Free resources + H5Sclose(memspace); - // Free resources - H5Sclose(memspace); + } else { + + hsize_t dims[ndim] {n_filter, n_score, 3}; + hsize_t start[ndim] {0, 0, 1}; + hsize_t count[ndim] {n_filter, n_score, 2}; + hid_t memspace = H5Screate_simple(ndim, dims, nullptr); + H5Sselect_hyperslab( + memspace, H5S_SELECT_SET, start, nullptr, count, nullptr); + + // Read the dataset + read_dataset_lowlevel( + group_id, "results", H5T_NATIVE_DOUBLE, memspace, false, results); + + // Free resources + H5Sclose(memspace); + } } void write_attr(hid_t obj_id, int ndim, const hsize_t* dims, const char* name, @@ -687,25 +706,44 @@ void write_string( group_id, 0, nullptr, buffer.length(), name, buffer.c_str(), indep); } -void write_tally_results( - hid_t group_id, hsize_t n_filter, hsize_t n_score, const double* results) +void write_tally_results(hid_t group_id, hsize_t n_filter, hsize_t n_score, + const double* results, bool vov_results) { // Set dimensions of sum/sum_sq hyperslab to store constexpr int ndim = 3; - hsize_t count[ndim] {n_filter, n_score, 2}; + if (vov_results) { + hsize_t count[ndim] {n_filter, n_score, 4}; + // Set dimensions of results array + hsize_t dims[ndim] {n_filter, n_score, 5}; - // Set dimensions of results array - hsize_t dims[ndim] {n_filter, n_score, 3}; - hsize_t start[ndim] {0, 0, 1}; - hid_t memspace = H5Screate_simple(ndim, dims, nullptr); - H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, nullptr, count, nullptr); + hsize_t start[ndim] {0, 0, 1}; + hid_t memspace = H5Screate_simple(ndim, dims, nullptr); + H5Sselect_hyperslab( + memspace, H5S_SELECT_SET, start, nullptr, count, nullptr); - // Create and write dataset - write_dataset_lowlevel(group_id, ndim, count, "results", H5T_NATIVE_DOUBLE, - memspace, false, results); + // Create and write dataset + write_dataset_lowlevel(group_id, ndim, count, "results", H5T_NATIVE_DOUBLE, + memspace, false, results); - // Free resources - H5Sclose(memspace); + // Free resources + H5Sclose(memspace); + } else { + hsize_t count[ndim] {n_filter, n_score, 2}; + // Set dimensions of results array + hsize_t dims[ndim] {n_filter, n_score, 3}; + + hsize_t start[ndim] {0, 0, 1}; + hid_t memspace = H5Screate_simple(ndim, dims, nullptr); + H5Sselect_hyperslab( + memspace, H5S_SELECT_SET, start, nullptr, count, nullptr); + + // Create and write dataset + write_dataset_lowlevel(group_id, ndim, count, "results", H5T_NATIVE_DOUBLE, + memspace, false, results); + + // Free resources + H5Sclose(memspace); + } } bool using_mpio_device(hid_t obj_id) diff --git a/src/mgxs.cpp b/src/mgxs.cpp index a88d2c196b6..9645f8a52a4 100644 --- a/src/mgxs.cpp +++ b/src/mgxs.cpp @@ -373,7 +373,7 @@ Mgxs::Mgxs(const std::string& in_name, const vector& mat_kTs, } } } // end switch - } // end microscopic temperature loop + } // end microscopic temperature loop // Now combine the microscopic data at each relevant temperature // We will do this by treating the multiple temperatures of a nuclide as diff --git a/src/output.cpp b/src/output.cpp index baaf682a157..4458eb17cf2 100644 --- a/src/output.cpp +++ b/src/output.cpp @@ -497,6 +497,34 @@ void print_runtime() //============================================================================== +double variance_of_variance(const double* x, int n) +{ + // Need to create a double for each sum + double sum = (x[static_cast(TallyResult::SUM)]); + double sum_sq = (x[static_cast(TallyResult::SUM_SQ)]); + double sum_rd = (x[static_cast(TallyResult::SUM_THIRD)]); + double sum_th = (x[static_cast(TallyResult::SUM_FOURTH)]); + + double term1 = sum_th; + double term2 = (4.0 * sum_rd * sum) / n; + double term3 = (6.0 * sum_sq * (pow(sum, 2.0))) / (pow(n, 2.0)); + double term4 = (3.0 * (pow(sum, 4.0))) / (pow(n, 3.0)); + double term5 = sum_sq - (1.0 / n) * (pow(sum, 2.0)); + // Fourth moment of the sample + double numerator = sum_th - (4.0 * sum_rd * sum) / n + + (6.0 * sum_sq * (pow(sum, 2.0))) / (pow(n, 2.0)) - + (3.0 * (pow(sum, 4.0))) / (pow(n, 3.0)); + + // Second moment of the sample + double denominator = (sum_sq - (1.0 / n) * (pow(sum, 2.0))) * + (sum_sq - (1.0 / n) * (pow(sum, 2.0))); + + // Equation for variance of variance + double vov = numerator / denominator - 1.0 / n; + + return vov; +} + std::pair mean_stdev(const double* x, int n) { double mean = x[static_cast(TallyResult::SUM)] / n; @@ -606,8 +634,9 @@ void write_tallies() return; // Set filename for tallies_out - std::string filename = fmt::format("{}tallies.out", settings::path_output); - + // std::string filename = fmt::format("{}tallies.out", settings::path_output); + std::string filename = fmt::format("{}tallies_{}_{}.out", + settings::path_output, settings::n_batches, settings::n_particles); // Open the tallies.out file. std::ofstream tallies_out; tallies_out.open(filename, std::ios::out | std::ios::trunc); @@ -699,20 +728,28 @@ void write_tallies() } } - // Write the score, mean, and uncertainty. - indent += 2; + // Write the score, mean, uncertainty and vov. + indent += 3; for (auto score : tally.scores_) { std::string score_name = score > 0 ? reaction_name(score) : score_names.at(score); double mean, stdev; - std::tie(mean, stdev) = - mean_stdev(&tally.results_(filter_index, score_index, 0), + mean_stdev(&tally.results_(filter_index, score_index, 0), + tally.n_realizations_); + if (tally.vov_results()) { + double vov = variance_of_variance( + &tally.results_(filter_index, score_index, 0), tally.n_realizations_); - fmt::print(tallies_out, "{0:{1}}{2:<36} {3:.6} +/- {4:.6}\n", "", - indent + 1, score_name, mean, t_value * stdev); + fmt::print(tallies_out, + "{0:{1}}{2:<36} {3:.6} +/- {4:.6} -- VOV: {5:.6}\n", "", + indent + 1, score_name, mean, stdev / mean, vov); + } else { + fmt::print(tallies_out, "{0:{1}}{2:<36} {3:.6} +/- {4:.6}\n", "", + indent + 1, score_name, mean, t_value * stdev); + } score_index += 1; } - indent -= 2; + indent -= 3; } } } diff --git a/src/state_point.cpp b/src/state_point.cpp index 3b822715cff..65947d88525 100644 --- a/src/state_point.cpp +++ b/src/state_point.cpp @@ -201,6 +201,12 @@ extern "C" int openmc_statepoint_write(const char* filename, bool* write_source) write_attribute(tally_group, "multiply_density", 0); } + if (tally->vov_results()) { + write_attribute(tally_group, "vov_results", 1); + } else { + write_attribute(tally_group, "vov_results", 0); + } + if (tally->estimator_ == TallyEstimator::ANALOG) { write_dataset(tally_group, "estimator", "analog"); } else if (tally->estimator_ == TallyEstimator::TRACKLENGTH) { @@ -264,12 +270,13 @@ extern "C" int openmc_statepoint_write(const char* filename, bool* write_source) for (const auto& tally : model::tallies) { if (!tally->writable_) continue; - // Write sum and sum_sq for each bin + + // Write sum, sum_sq, sum_rd and sum_th for each bin std::string name = "tally " + std::to_string(tally->id_); hid_t tally_group = open_group(tallies_group, name.c_str()); auto& results = tally->results_; write_tally_results(tally_group, results.shape()[0], - results.shape()[1], results.data()); + results.shape()[1], results.data(), tally->vov_results()); close_group(tally_group); } } else { @@ -508,7 +515,7 @@ extern "C" int openmc_statepoint_load(const char* filename) } else { auto& results = tally->results_; read_tally_results(tally_group, results.shape()[0], - results.shape()[1], results.data()); + results.shape()[1], results.data(), tally->vov_results()); read_dataset(tally_group, "n_realizations", tally->n_realizations_); close_group(tally_group); } @@ -1000,7 +1007,8 @@ void write_tally_results_nr(hid_t file_id) // Write reduced tally results to file auto shape = results_copy.shape(); - write_tally_results(tally_group, shape[0], shape[1], results_copy.data()); + write_tally_results( + tally_group, shape[0], shape[1], results_copy.data(), t->vov_results()); close_group(tally_group); } else { diff --git a/src/tallies/tally.cpp b/src/tallies/tally.cpp index 54840b5188e..7634809a2c3 100644 --- a/src/tallies/tally.cpp +++ b/src/tallies/tally.cpp @@ -102,6 +102,9 @@ Tally::Tally(pugi::xml_node node) multiply_density_ = get_node_value_bool(node, "multiply_density"); } + if (check_for_node(node, "vov")) { + vov_ = get_node_value_bool(node, "vov"); + } // ======================================================================= // READ DATA FOR FILTERS @@ -796,7 +799,11 @@ void Tally::init_triggers(pugi::xml_node node) void Tally::init_results() { int n_scores = scores_.size() * nuclides_.size(); - results_ = xt::empty({n_filter_bins_, n_scores, 3}); + if (vov_) { + results_ = xt::empty({n_filter_bins_, n_scores, 5}); + } else { + results_ = xt::empty({n_filter_bins_, n_scores, 3}); + } } void Tally::reset() @@ -831,7 +838,9 @@ void Tally::accumulate() // Accumulate each result #pragma omp parallel for + // filter bins (specific cell, energy bins) for (int i = 0; i < results_.shape()[0]; ++i) { + // score bins (flux, total reaction rate, fission reaction rate, etc.) for (int j = 0; j < results_.shape()[1]; ++j) { double val = results_(i, j, TallyResult::VALUE) * norm; results_(i, j, TallyResult::VALUE) = 0.0; @@ -842,6 +851,47 @@ void Tally::accumulate() } } +void Tally::accumulate_vov() +{ + // Increment number of realizations + n_realizations_ += settings::reduce_tallies ? 1 : mpi::n_procs; + + if (mpi::master || !settings::reduce_tallies) { + // Calculate total source strength for normalization + double total_source = 0.0; + if (settings::run_mode == RunMode::FIXED_SOURCE) { + for (const auto& s : model::external_sources) { + total_source += s->strength(); + } + } else { + total_source = 1.0; + } + + // Account for number of source particles in normalization + double norm = + total_source / (settings::n_particles * settings::gen_per_batch); + + if (settings::solver_type == SolverType::RANDOM_RAY) { + norm = 1.0; + } + +// Accumulate each result +#pragma omp parallel for + // filter bins (specific cell, energy bins) + for (int i = 0; i < results_.shape()[0]; ++i) { + // score bins (flux, total reaction rate, fission reaction rate, etc.) + for (int j = 0; j < results_.shape()[1]; ++j) { + double val = results_(i, j, TallyResult::VALUE) * norm; + results_(i, j, TallyResult::VALUE) = 0.0; + results_(i, j, TallyResult::SUM) += val; + results_(i, j, TallyResult::SUM_SQ) += val * val; + results_(i, j, TallyResult::SUM_THIRD) += val * val * val; + results_(i, j, TallyResult::SUM_FOURTH) += val * val * val * val; + } + } + } +} + int Tally::score_index(const std::string& score) const { for (int i = 0; i < scores_.size(); i++) { @@ -972,18 +1022,37 @@ void reduce_tally_results() static_cast(TallyResult::VALUE)); // Make copy of tally values in contiguous array - xt::xtensor values = values_view; - xt::xtensor values_reduced = xt::empty_like(values); - - // Reduce contiguous set of tally results - MPI_Reduce(values.data(), values_reduced.data(), values.size(), - MPI_DOUBLE, MPI_SUM, 0, mpi::intracomm); - - // Transfer values on master and reset on other ranks - if (mpi::master) { - values_view = values_reduced; + // TO DO: allocate the correct size for the values array during + // initialization + + if (vov_) { + xt::xtensor values = values_view; + xt::xtensor values_reduced = xt::empty_like(values); + + // Reduce contiguous set of tally results + MPI_Reduce(values.data(), values_reduced.data(), values.size(), + MPI_DOUBLE, MPI_SUM, 0, mpi::intracomm); + + // Transfer values on master and reset on other ranks + if (mpi::master) { + values_view = values_reduced; + } else { + values_view = 0.0; + } } else { - values_view = 0.0; + xt::xtensor values = values_view; + xt::xtensor values_reduced = xt::empty_like(values); + + // Reduce contiguous set of tally results + MPI_Reduce(values.data(), values_reduced.data(), values.size(), + MPI_DOUBLE, MPI_SUM, 0, mpi::intracomm); + + // Transfer values on master and reset on other ranks + if (mpi::master) { + values_view = values_reduced; + } else { + values_view = 0.0; + } } } } @@ -1064,7 +1133,11 @@ void accumulate_tallies() // Accumulate results for each tally for (int i_tally : model::active_tallies) { auto& tally {model::tallies[i_tally]}; - tally->accumulate(); + if (tally->vov_results()) { + tally->accumulate_vov(); + } else { + tally->accumulate(); + } } } diff --git a/tests/unit_tests/test_tallies.py b/tests/unit_tests/test_tallies.py index c38f067d58c..659a3bdd373 100644 --- a/tests/unit_tests/test_tallies.py +++ b/tests/unit_tests/test_tallies.py @@ -1,7 +1,9 @@ import numpy as np import pytest import openmc - +import openmc.tally_stats as ts +import os, h5py, pprint +import shutil def test_xml_roundtrip(run_in_tmpdir): # Create a tally with all possible gizmos @@ -112,6 +114,125 @@ def test_figure_of_merit(sphere_model, run_in_tmpdir): assert tally.figure_of_merit == pytest.approx(1 / (rel_err**2 * time)) +def test_tally_normality_functions(): + values = np.arange(1, 21, dtype=float) + n = values.size + mean = np.array([values.mean()]) + sum_sq = np.array([np.sum(values**2)]) + sum_rd = np.array([np.sum(values**3)]) + sum_th = np.array([np.sum(values**4)]) + + sqrt_b1, b2 = ts._calc_b1_b2(n, mean, sum_sq, sum_rd, sum_th) + assert sqrt_b1.shape == mean.shape + assert b2.shape == mean.shape + + Zb1, p_skew, _ = ts.skewness_test(n, mean, sum_sq, sum_rd, sum_th) + assert Zb1.shape == mean.shape + assert p_skew.shape == mean.shape + assert np.all((0.0 <= p_skew) & (p_skew <= 1.0)) + + Zb2, p_kurt, _ = ts.kurtosis_test(n, mean, sum_sq, sum_rd, sum_th) + assert Zb2.shape == mean.shape + assert p_kurt.shape == mean.shape + assert np.all((0.0 <= p_kurt) & (p_kurt <= 1.0)) + + K2, p_omni = ts.k2_test(Zb1, Zb2) + assert K2.shape == mean.shape + assert p_omni.shape == mean.shape + assert np.all((0.0 <= p_omni) & (p_omni <= 1.0)) + + +def test_tally_normality_stats(sphere_model, run_in_tmpdir): + + openmc_path = shutil.which("openmc") + assert openmc_path is not None, "Cannot find 'openmc' on PATH!" + print("pytest will invoke OpenMC binary at:", openmc_path) + + tally = openmc.Tally() + tally.scores = ['flux'] + tally.vov = True + tally.normality_tests = True + sphere_model.tallies = [tally] + + sphere_model.settings.particles = 500 + sphere_model.settings.batches = 30 + sphere_model.settings.run_mode = 'fixed source' + + sp_file = sphere_model.run(apply_tally_results=True) + + n = tally.num_realizations + mu = tally.mean + s2 = tally.sum_sq + s3 = tally.sum_rd + s4 = tally.sum_th + + assert n >= 20 + assert mu is not None + + Zg1, p_skew, _ = ts.skewness_test(n, mu, s2, s3, s4) + Zg2, p_kurt, _ = ts.kurtosis_test(n, mu, s2, s3, s4) + K2, p_omni = ts.k2_test(Zg1, Zg2) + + for arr in (Zg1, Zg2, K2, p_skew, p_kurt, p_omni): + assert arr.shape == mu.shape + + for p in (p_skew, p_kurt, p_omni): + assert ((0.0 <= p) & (p <= 1.0)).all() + +def test_vov(sphere_model, run_in_tmpdir): + # Create a tally with all the gizmos + tally = openmc.Tally(name='test tally') + ef = openmc.EnergyFilter([0.0, 0.1, 1.0, 10.0e6]) + mesh = openmc.RegularMesh.from_domain(sphere_model.geometry, (2, 2, 2)) + mf = openmc.MeshFilter(mesh) + tally.filters = [ef, mf] + tally.scores = ['flux', 'absorption', 'fission', 'scatter'] + tally.vov = True + sphere_model.tallies = [tally] + + sp_file = sphere_model.run(apply_tally_results=True) + + assert tally._mean is None + assert tally._std_dev is None + assert tally._sum is None + assert tally._sum_sq is None + assert tally._sum_rd is None + assert tally._sum_th is None + assert tally._num_realizations == 0 + assert tally._sp_filename == sp_file + + with openmc.StatePoint(sp_file) as sp: + assert tally in sp.tallies.values() + sp_tally = sp.tallies[tally.id] + + assert np.all(sp_tally.std_dev == tally.std_dev) + assert np.all(sp_tally.mean == tally.mean) + assert np.all(sp_tally.VOV == tally.VOV) + assert sp_tally.nuclides == tally.nuclides + + n = sp_tally.num_realizations + mean = sp_tally.mean + sum_ = sp_tally._sum + sum_sq = sp_tally._sum_sq + sum_rd = sp_tally._sum_rd + sum_th = sp_tally._sum_th + + expected_vov = np.zeros_like(mean) + nonzero = np.abs(mean) > 0 + + num = ( + sum_th + - (4.0 * sum_rd * sum_) / n + + (6.0 * sum_sq * sum_**2) / (n**2) + - (3.0 * sum_**4) / (n**3) + ) + den = (sum_sq - (1.0/n)*sum_**2)**2 + + expected_vov[nonzero] = num[nonzero] / den[nonzero] - 1.0 / n + + assert np.allclose(expected_vov, sp_tally.VOV, rtol=1e-7, atol=0.0) + + def test_tally_application(sphere_model, run_in_tmpdir): # Create a tally with most possible gizmos tally = openmc.Tally(name='test tally') @@ -120,6 +241,7 @@ def test_tally_application(sphere_model, run_in_tmpdir): mf = openmc.MeshFilter(mesh) tally.filters = [ef, mf] tally.scores = ['flux', 'absorption', 'fission', 'scatter'] + tally.vov = True sphere_model.tallies = [tally] # FIRST RUN @@ -131,6 +253,8 @@ def test_tally_application(sphere_model, run_in_tmpdir): assert tally._std_dev is None assert tally._sum is None assert tally._sum_sq is None + assert tally._sum_rd is None + assert tally._sum_th is None assert tally._num_realizations == 0 # the statepoint file property should be set, however assert tally._sp_filename == sp_file @@ -142,6 +266,7 @@ def test_tally_application(sphere_model, run_in_tmpdir): # at this point the tally information regarding results should be the same assert (sp_tally.std_dev == tally.std_dev).all() assert (sp_tally.mean == tally.mean).all() + assert (sp_tally.VOV == tally.VOV).all() assert sp_tally.nuclides == tally.nuclides # SECOND RUN @@ -151,7 +276,8 @@ def test_tally_application(sphere_model, run_in_tmpdir): assert (sp_tally.std_dev != tally.std_dev).any() assert (sp_tally.mean != tally.mean).any() - + assert (sp_tally.VOV != tally.VOV).any() + # now re-read data from the new stateopint file and # ensure that the new results match those in # the latest statepoint @@ -162,4 +288,5 @@ def test_tally_application(sphere_model, run_in_tmpdir): # at this point the tally information regarding results should be the same assert (sp_tally.std_dev == tally.std_dev).all() assert (sp_tally.mean == tally.mean).all() + assert (sp_tally.VOV == tally.VOV).all() assert sp_tally.nuclides == tally.nuclides