diff --git a/.travis.yml b/.travis.yml.bak similarity index 100% rename from .travis.yml rename to .travis.yml.bak diff --git a/PyMca5/PyMcaCore/DataObject.py b/PyMca5/PyMcaCore/DataObject.py index 20494681b..ceb7655b7 100644 --- a/PyMca5/PyMcaCore/DataObject.py +++ b/PyMca5/PyMcaCore/DataObject.py @@ -1,4 +1,4 @@ -#/*########################################################################## +# /*########################################################################## # # The PyMca X-Ray Fluorescence Toolkit # @@ -32,8 +32,9 @@ __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" import numpy + class DataObject(object): - ''' + """ Simple container of an array and associated information. Basically it has the members: info: A dictionary @@ -47,15 +48,16 @@ class DataObject(object): x: A list containing arrays to be considered axes y: A list of data to be considered as signals m: A list containing the monitor data - ''' + """ + GETINFO_DEPRECATION_WARNING = True GETDATA_DEPRECATION_WARNING = True SELECT_DEPRECATION_WARNING = True def __init__(self): - ''' + """ Default Constructor - ''' + """ self.info = {} self.data = numpy.array([]) @@ -68,7 +70,7 @@ def getInfo(self): """ if DataObject.GETINFO_DEPRECATION_WARNING: print("DEPRECATION WARNING: DataObject.getInfo()") - DataObject.GETINFO_DEPRECATION_WARNING = False + DataObject.GETINFO_DEPRECATION_WARNING = False return self.info def getData(self): @@ -77,7 +79,7 @@ def getData(self): """ if DataObject.GETDATA_DEPRECATION_WARNING: print("DEPRECATION WARNING: DataObject.getData()") - DataObject.GETDATA_DEPRECATION_WARNING = False + DataObject.GETDATA_DEPRECATION_WARNING = False return self.data def select(self, selection=None): @@ -89,78 +91,79 @@ def select(self, selection=None): DataObject.SELECT_DEPRECATION_WARNING = False dataObject = DataObject() dataObject.info = self.info - dataObject.info['selection'] = selection + dataObject.info["selection"] = selection if selection is None: dataObject.data = self.data return dataObject if type(selection) == dict: - #dataObject.data = self.data #should I set it to none??? + # dataObject.data = self.data #should I set it to none??? dataObject.data = None - if 'rows' in selection: + if "rows" in selection: dataObject.x = None dataObject.y = None dataObject.m = None - if 'x' in selection['rows']: - for rownumber in selection['rows']['x']: + if "x" in selection["rows"]: + for rownumber in selection["rows"]["x"]: if rownumber is None: continue if dataObject.x is None: dataObject.x = [] dataObject.x.append(self.data[rownumber, :]) - if 'y' in selection['rows']: - for rownumber in selection['rows']['y']: + if "y" in selection["rows"]: + for rownumber in selection["rows"]["y"]: if rownumber is None: continue if dataObject.y is None: dataObject.y = [] dataObject.y.append(self.data[rownumber, :]) - if 'm' in selection['rows']: - for rownumber in selection['rows']['m']: + if "m" in selection["rows"]: + for rownumber in selection["rows"]["m"]: if rownumber is None: continue if dataObject.m is None: dataObject.m = [] dataObject.m.append(self.data[rownumber, :]) - elif ('cols' in selection) or ('columns' in selection): - if 'cols' in selection: - key = 'cols' + elif ("cols" in selection) or ("columns" in selection): + if "cols" in selection: + key = "cols" else: - key = 'columns' + key = "columns" dataObject.x = None dataObject.y = None dataObject.m = None - if 'x' in selection[key]: - for rownumber in selection[key]['x']: + if "x" in selection[key]: + for rownumber in selection[key]["x"]: if rownumber is None: continue if dataObject.x is None: dataObject.x = [] dataObject.x.append(self.data[:, rownumber]) - if 'y' in selection[key]: - for rownumber in selection[key]['y']: + if "y" in selection[key]: + for rownumber in selection[key]["y"]: if rownumber is None: continue if dataObject.y is None: dataObject.y = [] dataObject.y.append(self.data[:, rownumber]) - if 'm' in selection[key]: - for rownumber in selection[key]['m']: + if "m" in selection[key]: + for rownumber in selection[key]["m"]: if rownumber is None: continue if dataObject.m is None: dataObject.m = [] dataObject.m.append(self.data[:, rownumber]) if dataObject.x is None: - if 'Channel0' in dataObject.info: - ch0 = int(dataObject.info['Channel0']) + if "Channel0" in dataObject.info: + ch0 = int(dataObject.info["Channel0"]) else: ch0 = 0 - dataObject.x = [numpy.arange(ch0, - ch0 + len(dataObject.y[0])).astype(numpy.float64)] + dataObject.x = [ + numpy.arange(ch0, ch0 + len(dataObject.y[0])).astype(numpy.float64) + ] if not ("selectiontype" in dataObject.info): dataObject.info["selectiontype"] = "%dD" % len(dataObject.y) return dataObject diff --git a/PyMca5/PyMcaGui/physics/xrf/McaAdvancedFit.py b/PyMca5/PyMcaGui/physics/xrf/McaAdvancedFit.py index 0dc5a4002..3f16cb20b 100644 --- a/PyMca5/PyMcaGui/physics/xrf/McaAdvancedFit.py +++ b/PyMca5/PyMcaGui/physics/xrf/McaAdvancedFit.py @@ -58,10 +58,10 @@ #not a big problem pass -from PyMca5.PyMcaPhysics.xrf import ClassMcaTheory -FISX = ClassMcaTheory.FISX +from PyMca5.PyMcaPhysics.xrf import LegacyMcaTheory +FISX = LegacyMcaTheory.FISX if FISX: - FisxHelper = ClassMcaTheory.FisxHelper + FisxHelper = LegacyMcaTheory.FisxHelper from . import FitParam from . import McaAdvancedTable from . import QtMcaAdvancedFitReport @@ -291,7 +291,7 @@ def __init__(self, parent=None, name="PyMca - McaAdvancedFit",fl=0, self.matrixXRFMCSpectrumButton.setToolTip('Calculate Matrix Spectrum Using Monte Carlo') self.peaksSpectrumButton.setToolTip('Toggle Individual Peaks Spectrum Calculation On/Off') - self.mcafit = ClassMcaTheory.McaTheory() + self.mcafit = LegacyMcaTheory.McaTheory() self.fitButton.clicked.connect(self.fit) self.printButton.clicked.connect(self.printActiveTab) diff --git a/PyMca5/PyMcaGui/physics/xrf/XRFMCPyMca.py b/PyMca5/PyMcaGui/physics/xrf/XRFMCPyMca.py index d19a8eb0f..a0cfc09fd 100644 --- a/PyMca5/PyMcaGui/physics/xrf/XRFMCPyMca.py +++ b/PyMca5/PyMcaGui/physics/xrf/XRFMCPyMca.py @@ -805,7 +805,7 @@ def _start(self): #perform a dummy fit till xmimsim-pymca is upgraded if 0: import numpy - from PyMca import ClassMcaTheory + from PyMca import LegacyMcaTheory newFile['fit']['linearfitflag']=1 newFile['fit']['stripflag']=0 newFile['fit']['stripiterations']=0 @@ -814,7 +814,7 @@ def _start(self): #xdata = numpy.arange(xmin, xmax + 1) * 1.0 xdata = numpy.arange(0, xmax + 1) * 1.0 ydata = 0.0 + 0.1 * xdata - mcaFit = ClassMcaTheory.McaTheory() + mcaFit = LegacyMcaTheory.McaTheory() mcaFit.configure(newFile) mcaFit.setData(x=xdata, y=ydata, xmin=xmin, xmax=xmax) mcaFit.estimate() diff --git a/PyMca5/PyMcaMath/fitting/model/CachingLinkedModel.py b/PyMca5/PyMcaMath/fitting/model/CachingLinkedModel.py new file mode 100644 index 000000000..0af8b59eb --- /dev/null +++ b/PyMca5/PyMcaMath/fitting/model/CachingLinkedModel.py @@ -0,0 +1,53 @@ +import numpy + +from .LinkedModel import LinkedModel +from .CachingModel import CachedPropertiesModel + + +class CachedPropertiesLinkModel(CachedPropertiesModel, LinkedModel): + def _iter_cached_property_ids(self, **cacheoptions): + if self.is_linked and self._in_property_caching_context(): + # Yield only the property id's that belong to this model + names = self._cached_property_names() + for propid in super()._iter_cached_property_ids(**cacheoptions): + if propid.property_name in names: + yield propid + else: + yield from super()._iter_cached_property_ids(**cacheoptions) + + def _get_property_values(self, **cacheoptions): + values = super()._get_property_values(**cacheoptions) + return self.__extract_values(values, **cacheoptions) + + def _set_property_values(self, values, **cacheoptions): + values = self.__insert_values(values, **cacheoptions) + super()._set_property_values(values, **cacheoptions) + + def __extract_values(self, values, **cacheoptions): + if not self.is_linked: + return values + # Only the values that belong to this model + data = list() + for propid in self._iter_cached_property_ids(**cacheoptions): + index = self._propid_to_index(propid, **cacheoptions) + data.append(numpy.atleast_1d(values[index]).tolist()) + + return numpy.concatenate(data) + + def __insert_values(self, values, **cacheoptions): + if not self.is_linked: + return values + gvalues = super()._get_property_values(**cacheoptions) + i = 0 + for propid in self._iter_cached_property_ids(**cacheoptions): + index = self._propid_to_index(propid, **cacheoptions) + try: + n = len(gvalues[index]) + except TypeError: + n = 1 + if n == 1: + gvalues[index] = values[i] + else: + gvalues[index] = values[i : i + n] + i += n + return gvalues diff --git a/PyMca5/PyMcaMath/fitting/model/CachingModel.py b/PyMca5/PyMcaMath/fitting/model/CachingModel.py new file mode 100644 index 000000000..070d15b29 --- /dev/null +++ b/PyMca5/PyMcaMath/fitting/model/CachingModel.py @@ -0,0 +1,299 @@ +import functools +from contextlib import contextmanager + +from .PropertyUtils import wrapped_property + + +class CacheManager: + """Object that manages a cache""" + + def __init__(self, *args, **kw): + self._cache_root = dict() + super().__init__(*args, **kw) + + def _create_empty_property_values_cache(self, key, **cacheoptions): + # By default the property cache is a dictionary + return dict() + + def _property_index_from_id(self, propid, **cacheoptions): + # By default the property cache index is its propid + return propid + + def _property_cache_key(self, **cacheoptions): + # By default we only manage 1 cache (None) + return None + + +class CachingModel(CacheManager): + """Model that manages and uses an internal cache (default) + or uses an external cache. + """ + + def __init__(self, *args, **kw): + super().__init__(*args, **kw) + self._cache_manager = None + + @property + def _cache_manager(self): + if self.__external_cache_manager is None: + return self + else: + return self.__external_cache_manager + + @_cache_manager.setter + def _cache_manager(self, obj): + if obj is not None and not isinstance(obj, CacheManager): + raise TypeError(obj, type(obj)) + self.__external_cache_manager = obj + + @contextmanager + def _cachingContext(self, cachename): + cache_root = self._cache_manager._cache_root + new_context_entry = cachename not in cache_root + if new_context_entry: + cache_root[cachename] = dict() + try: + yield cache_root[cachename] + finally: + if new_context_entry: + del cache_root[cachename] + + def _cachingEnabled(self, cachename): + return cachename in self._cache_manager._cache_root + + def _getCache(self, cachename, *subnames): + cache_root = self._cache_manager._cache_root + if cachename not in cache_root: + return None + ret = cache_root[cachename] + for cachename in subnames: + try: + ret = ret[cachename] + except KeyError: + ret[cachename] = dict() + ret = ret[cachename] + return ret + + +class cached_property(wrapped_property): + """Property getter/setter may get/set from + a cache when enabled. + """ + + def _wrap_getter(self, fget): + @functools.wraps(fget) + def wrapper(oself, nocache=False): + if nocache: + return fget(oself) + else: + return oself._cached_property_fget(fget) + + return super()._wrap_getter(wrapper) + + def _wrap_setter(self, fset): + @functools.wraps(fset) + def wrapper(oself, value, nocache=False): + if nocache: + return fset(oself, value) + else: + return oself._cached_property_fset(fset, value) + + return super()._wrap_setter(wrapper) + + +class CachedPropertiesModel(CachingModel): + """Model that implements cached properties""" + + _CACHED_PROPERTIES = tuple() + + def __init_subclass__(subcls, **kwargs): + super().__init_subclass__(**kwargs) + allp = list(subcls._CACHED_PROPERTIES) + for name, attr in vars(subcls).items(): + if isinstance(attr, cached_property) and name not in allp: + allp.append(name) + subcls._CACHED_PROPERTIES = tuple(sorted(allp)) + + @classmethod + def _cached_property_names(cls): + """All property names for this class""" + return cls._CACHED_PROPERTIES + + def _instance_cached_property_ids(self, **cacheoptions): + """All property id's for this instance and the provided cache options""" + return self._cached_property_names() + + @contextmanager + def _propertyCachingContext(self, persist=False, start_cache=None, **cacheoptions): + values_cache = self._get_property_values_cache(**cacheoptions) + if values_cache is not None: + # Re-entering this context should not affect anything + yield values_cache + return + + with self._cachingContext("_propid"): + if start_cache is None: + values_cache = self._create_start_property_values_cache(**cacheoptions) + else: + values_cache = start_cache + + with self._cachingContext("_property_values"): + self._set_property_values_cache(values_cache, **cacheoptions) + yield values_cache + + if persist: + self._persist_property_values(values_cache, **cacheoptions) + + def _in_property_caching_context(self): + return self._getCache("_property_values") is not None + + def _get_property_values_cache(self, **cacheoptions): + caches = self._getCache("_property_values") + if caches is None: + return None + key = self._cache_manager._property_cache_key(**cacheoptions) + return caches.get(key, None) + + def _set_property_values_cache(self, values_cache, **cacheoptions): + caches = self._getCache("_property_values") + if caches is None: + return False + key = self._cache_manager._property_cache_key(**cacheoptions) + cache = caches.get(key, None) + if cache is None: + caches[key] = values_cache + else: + cache[:] = values_cache + return True + + def _create_start_property_values_cache(self, **cacheoptions): + """Fill an empty cache with property values""" + _cache_manager = self._cache_manager + key = _cache_manager._property_cache_key(**cacheoptions) + values_cache = _cache_manager._create_empty_property_values_cache( + key, **cacheoptions + ) + for propid in self._iter_cached_property_ids(**cacheoptions): + index = self._propid_to_index(propid, **cacheoptions) + values_cache[index] = self._get_noncached_property_value(propid) + return values_cache + + def _get_property_values(self, **cacheoptions): + """Get the cache when enabled, get instance property values when disabled""" + values = self._get_property_values_cache(**cacheoptions) + if values is None: + return self._create_start_property_values_cache(**cacheoptions) + return values + + def _set_property_values(self, values, **cacheoptions): + """Set the cache when enabled, set instance property values when disabled""" + success = self._set_property_values_cache(values, **cacheoptions) + if not success: + self._persist_property_values(values, **cacheoptions) + + def _persist_property_values(self, values, **cacheoptions): + for propid in self._iter_cached_property_ids(**cacheoptions): + index = self._propid_to_index(propid, **cacheoptions) + self._set_noncached_property_value(propid, values[index]) + + def _get_property_value(self, propid, **cacheoptions): + """Get the value from the cache or from the property""" + values_cache = self._get_property_values_cache(**cacheoptions) + if values_cache is None: + return self._get_noncached_property_value(propid) + index = self._propid_to_index(propid, **cacheoptions) + return values_cache[index] + + def _get_noncached_property_value(self, propid): + name = self._property_name_from_id(propid) + return getattr(self, name) + + def _set_property_value(self, propid, value, **cacheoptions): + """Set the value in the cache or the property""" + values_cache = self._get_property_values_cache(**cacheoptions) + if values_cache is None: + return self._set_noncached_property_value(propid, value) + index = self._propid_to_index(propid, **cacheoptions) + values_cache[index] = value + + def _set_noncached_property_value(self, propid, value): + name = self._property_name_from_id(propid) + setattr(self, name, value) + + def _cached_property_fget(self, fget): + """Same as _get_property_value but we have the property object + instead of the propid + """ + values_cache = self._get_property_values_cache() + propid = self._name_to_propid(fget.__name__) + if values_cache is None or propid is None: + return fget(self) + index = self._propid_to_index(propid) + return values_cache[index] + + def _cached_property_fset(self, fset, value): + """Same as _set_property_value but we have the property object + instead of the propid + """ + values_cache = self._get_property_values_cache() + propid = self._name_to_propid(fset.__name__) + if values_cache is None or propid is None: + return fset(self, value) + index = self._propid_to_index(propid) + values_cache[index] = value + + def _get_from_propid_cache(self, *subnames, dtype=dict, **cacheoptions): + """Returns the cache when propid caching is enabled, potentially initialized + by instantiating `dtype`. Returns `None` when propid caching is disabled. + """ + caches = self._getCache("_propid", *subnames) + if caches is None: + return None + key = self._cache_manager._property_cache_key(**cacheoptions) + return caches.setdefault(key, dtype()) + + def _iter_cached_property_ids(self, **cacheoptions): + """To be used when iterating over all property id's + of this instance. + """ + propid_list = self._get_from_propid_cache( + "_propid_list", dtype=list, **cacheoptions + ) + if propid_list is None: + yield from self._instance_cached_property_ids(**cacheoptions) + return + if not propid_list: + propid_list.extend(self._instance_cached_property_ids(**cacheoptions)) + yield from propid_list + + def _propid_to_index(self, propid, **cacheoptions): + propid_to_index = self._get_from_propid_cache( + "_propid_to_index", dtype=dict, **cacheoptions + ) + if propid_to_index is None: + return self._cache_manager._property_index_from_id(propid, **cacheoptions) + if propid in propid_to_index: + return propid_to_index[propid] + index = self._cache_manager._property_index_from_id(propid, **cacheoptions) + propid_to_index[propid] = index + return index + + def _name_to_propid(self, property_name, **cacheoptions): + return self._property_id_from_name(property_name) + # TODO: duplicate names for linked models + name_to_propid = self._get_from_propid_cache( + "_name_to_propid", dtype=dict, **cacheoptions + ) + if name_to_propid is None: + return self._property_id_from_name(property_name) + if property_name in name_to_propid: + return name_to_propid[property_name] + propid = self._property_id_from_name(property_name) + name_to_propid[property_name] = propid + return propid + + def _property_id_from_name(self, property_name): + return property_name + + def _property_name_from_id(self, propid): + return propid diff --git a/PyMca5/PyMcaMath/fitting/model/LeastSquaresFitModel.py b/PyMca5/PyMcaMath/fitting/model/LeastSquaresFitModel.py new file mode 100644 index 000000000..a5dee7a05 --- /dev/null +++ b/PyMca5/PyMcaMath/fitting/model/LeastSquaresFitModel.py @@ -0,0 +1,828 @@ +from contextlib import contextmanager, ExitStack +import numpy + +from PyMca5.PyMcaMath.linalg import lstsq +from PyMca5.PyMcaMath.fitting import Gefit + +from .ParameterModel import ParameterModelBase +from .ParameterModel import ParameterModel +from .ParameterModel import ParameterModelManager +from .ParameterModel import ParameterType + + +class LeastSquaresFitModelInterface: + """All classes derived from `LeastSquaresFitModel` must implement + this interface. + """ + + @property + def xdata(self): + raise NotImplementedError + + @xdata.setter + def xdata(self, value): + raise NotImplementedError + + @property + def ydata(self): + raise NotImplementedError + + @ydata.setter + def ydata(self, value): + raise NotImplementedError + + @property + def ystd(self): + return None + + @ystd.setter + def ystd(self, value): + raise NotImplementedError + + def evaluate_fitmodel(self, xdata=None): + """Evaluate the fit model. + + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + raise NotImplementedError + + def derivative_fitmodel(self, param_idx, xdata=None, **paramtype): + """Derivate to a specific parameter of the fit model. + + Only required when you want to implement analytical derivatives + of the fit model. Numerical derivatives are used by default. + + Note that the numerical derivatives for non-linear parameters + are approximations. They are exact with arithmetic precission + for linear parameters. + + :param int param_idx: + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + raise NotImplementedError + + def non_leastsquares_increment(self): + """Only required when niter_non_leastsquares > 0""" + raise NotImplementedError + + +class LeastSquaresFitModelBase(LeastSquaresFitModelInterface, ParameterModelBase): + """A parameter model with least-squares optimization""" + + @property + def ndata(self): + raise NotImplementedError + + @property + def yfitdata(self): + raise NotImplementedError + + @property + def yfitstd(self): + raise NotImplementedError + + @property + def yfit_weighted_residuals(self): + return (self.yfitdata - self.yfitmodel) / self.yfitstd + + @property + def chi_squared(self): + return (self.yfit_weighted_residuals ** 2).sum() + + @property + def reduced_chi_squared(self): + return self.chi_squared / self.degrees_of_freedom + + @property + def nfree_parameters(self): + constraints = self.get_parameter_constraints() + return numpy.sum(constraints[:, 0] < 3, dtype=int) + + @property + def degrees_of_freedom(self): + return self.ndata - self.nfree_parameters + + def evaluate_fullmodel(self, xdata=None): + """Evaluate the full model. + + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + raise NotImplementedError + + def evaluate_decomposed_fullmodel(self, xdata=None): + """Evaluate the full model. + + :param array xdata: shape (ndata,) + :returns array: n x ndata + """ + raise NotImplementedError + + def evaluate_decomposed_fitmodel(self, xdata=None): + """Evaluate the fit model. + + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + derivatives = self.independent_linear_derivatives_fitmodel(xdata=xdata) + parameters = self.get_parameter_values( + parameter_types=ParameterType.independent_linear + ) + return parameters.dot(derivatives) + + def independent_linear_derivatives_fitmodel(self, xdata=None): + """Derivates to all independent linear parameters + + :param array xdata: shape (ndata,) + :returns array: shape (nparams, ndata) + """ + nparams = self.get_n_parameters( + parameter_types=ParameterType.independent_linear + ) + return numpy.array( + [ + self.derivative_fitmodel( + i, xdata=xdata, parameter_types=ParameterType.independent_linear + ) + for i in range(nparams) + ] + ) + + def numerical_derivative_fitmodel(self, param_idx, xdata=None, **paramtype): + """Derivate to a specific parameter of the fit model. + + :param int param_idx: + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + raise NotImplementedError + + def compare_derivatives(self, xdata=None, **paramtype): + """Compare analytical and numerical derivatives. Useful to + validate the user defined `derivative_fitmodel`. + + :yields str, array, array: parameter name, analytical, numerical + """ + for param_idx, name in enumerate(self.get_parameter_names(**paramtype)): + ycalderiv = self.derivative_fitmodel(param_idx, xdata=xdata, **paramtype) + ynumderiv = self.numerical_derivative_fitmodel( + param_idx, xdata=xdata, **paramtype + ) + yield name, ycalderiv, ynumderiv + + def linear_decomposition_fitmodel(self, xdata=None): + """Linear decomposition of the fit model. + + :param array xdata: shape (ndata,) + :returns array: nparams x ndata + """ + derivatives = self.independent_linear_derivatives_fitmodel(xdata=xdata) + parameters = self.get_parameter_values( + parameter_types=ParameterType.independent_linear + ) + return parameters[:, numpy.newaxis] * derivatives + + @property + def yfullmodel(self): + """Model of ydata""" + return self.evaluate_fullmodel() + + @property + def yfitmodel(self): + """Model of yfitdata""" + return self.evaluate_fitmodel() + + def fit(self, full_output=False): + """ + :param bool full_output: add statistics to fitted parameters + :returns dict: + """ + if self.parameter_types == ParameterType.independent_linear: + return self.non_iterative_optimization(full_output=full_output) + else: + return self.iterative_optimization(full_output=full_output) + + def non_iterative_optimization(self, full_output=False): + """ + :param bool full_output: add statistics to fitted parameters + :returns dict: + """ + with self._non_iterative_optimization_context(): + b = self.yfitdata + sigma_b = self.yfitstd + for i in range(max(self.niter_non_leastsquares, 1)): + A = self.independent_linear_derivatives_fitmodel() + result = lstsq( + A.T, # ndata, nparams + b.copy(), # ndata + uncertainties=True, + covariances=False, + digested_output=True, + sigma_b=sigma_b, + weight=self.weightflag, + ) + if self.niter_non_leastsquares: + self.set_parameter_values(result["parameters"]) + self.non_leastsquares_increment() + result["parameter_types"] = self.parameter_types + result["parameters"] = result["parameters"] + result["uncertainties"] = result["uncertainties"] + result["chi2_red"] = numpy.nan + result.pop("svd") + if full_output: + result["niter"] = 1 + result["lastdeltachi"] = numpy.nan + return result + + def iterative_optimization(self, full_output=False): + """ + :param bool full_output: add statistics to fitted parameters + :returns dict: + """ + with self._iterative_optimization_context(): + constraints = self.get_parameter_constraints() + xdata = self.xdata + ydata = self.yfitdata + ystd = self.yfitstd + for i in range(max(self.niter_non_leastsquares, 1)): + parameters = self.get_parameter_values() + try: + result = Gefit.LeastSquaresFit( + self._gefit_evaluate_fitmodel, + parameters, + model_deriv=self._gefit_derivative_fitmodel, + xdata=xdata, + ydata=ydata, + sigmadata=ystd, + constrains=constraints.T, + maxiter=self.maxiter, + weightflag=self.weightflag, + deltachi=self.deltachi, + fulloutput=full_output, + linear=self.only_linear_parameters, + ) + except numpy.linalg.LinAlgError as e: + if "singular matrix" in str(e).lower(): + reason = self.__singular_matrix_reason( + xdata, parameters, constraints + ) + if reason: + raise RuntimeError(reason) from e + raise + if self.niter_non_leastsquares: + self.set_parameter_values(result[0]) + self.non_leastsquares_increment() + ret = { + "parameter_types": self.parameter_types, + "parameters": result[0], + "uncertainties": result[2], + "chi2_red": result[1], + } + if full_output: + ret["niter"] = result[3] + ret["lastdeltachi"] = result[4] + return ret + + def __singular_matrix_reason(self, xdata, parameters, constraints): + print("\n" * 3) + fitted = constraints[:, 0] != Gefit.CFIXED + fitted_indices = fitted.nonzero()[0].tolist() + derivatives = numpy.vstack( + self._gefit_derivative_fitmodel(parameters, i, xdata) + for i in fitted_indices + ) + names = numpy.array(self.get_parameter_names()) + names = names[fitted_indices].tolist() + + allzeros = (numpy.abs(derivatives) < 1e-10).all(axis=1) + if allzeros.any(): + names = [ + name for i, name in enumerate(self.get_parameter_names()) if allzeros[i] + ] + return "Derivatives of {} are zero".format(names) + + for i1, deriv1 in enumerate(derivatives): + for i2, deriv2 in enumerate(derivatives): + if i1 == i2: + continue + if numpy.allclose(deriv1, deriv2): + import matplotlib.pyplot as plt + + plt.plot(deriv1) + plt.plot(deriv2) + plt.show() + return "Derivatives of '{}' and '{}' are equal".format( + names[i1], names[i2] + ) + + @property + def maxiter(self): + return 100 + + @property + def deltachi(self): + return None + + @property + def weightflag(self): + return 0 + + @property + def niter_non_leastsquares(self): + return 0 + + @contextmanager + def _non_iterative_optimization_context(self): + with ExitStack() as stack: + ctx = self.parameter_types_context(ParameterType.independent_linear) + stack.enter_context(ctx) + ctx = self._propertyCachingContext() + stack.enter_context(ctx) + ctx = self._custom_non_iterative_optimization_context() + stack.enter_context(ctx) + yield + + @contextmanager + def _iterative_optimization_context(self): + with ExitStack() as stack: + ctx = self._propertyCachingContext() + stack.enter_context(ctx) + ctx = self._custom_iterative_optimization_context() + stack.enter_context(ctx) + yield + + @contextmanager + def _custom_non_iterative_optimization_context(self): + """To allow derived classes to add context""" + yield + + @contextmanager + def _custom_iterative_optimization_context(self): + """To allow derived classes to add context""" + yield + + def _gefit_evaluate_fitmodel(self, parameters, xdata): + """Update parameters and evaluate model + + :param array parameters: shape (nparams,) + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + self.set_parameter_values(parameters) + return self.evaluate_fitmodel(xdata=xdata) + + def _gefit_derivative_fitmodel(self, parameters, param_idx, xdata): + """Update parameters and return derivate to a specific parameter + + :param array parameters: shape (nparams,) + :param int param_idx: + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + self.set_parameter_values(parameters) + return self.derivative_fitmodel(param_idx, xdata=xdata) + + def use_fit_result(self, result): + """ + :param dict result: + """ + self.set_parameter_values( + result["parameters"], parameter_types=result["parameter_types"] + ) + + @contextmanager + def use_fit_result_context(self, result): + """Changes the parameters only for the duration of this context + + :param dict result: + """ + with self.parameter_types_context(result["parameter_types"]): + with self._propertyCachingContext(): + self.use_fit_result(result) + yield + + +class LeastSquaresFitModel(LeastSquaresFitModelBase, ParameterModel): + """A least-squares parameter model + + Derived classes: + + * implement the LeastSquaresFitModelInterface. + * add fit parameters as python properties by using the `nonlinear_parameter_group` or + `independent_linear_parameter_group` decorators instead of the `property` decorator. + + There is a "fit model" and a "full model". The full model describes the data, + the fit model describes the pre-processed data (for example smoothed, + background subtracted, ...). By default the full model and the fit model + are identical. + + Fitting is done with linear least-squares optimization (non iterative) + or non-linear least-squares optimization (iterative). An outer loop of + non-least-squares optimization can be enabled for either (iterative). + + Example: + + .. code-block:: python + + model = MyModel() + model.xdata = xdata + model.ydata = ydata + model.ystd = ydata**0.5 + + plt.plot(model.xdata, model.ydata, label="data") + plt.plot(model.xdata, model.ymodel, label="initial") + + result = model.fit() + model.use_fit_result(result) + + plt.plot(model.xdata, model.ymodel, label="fit") + """ + + @property + def ndata(self): + return len(self.xdata) + + @property + def yfitdata(self): + return self._y_full_to_fit(self.ydata) + + @property + def yfitstd(self): + return self._ystd_full_to_fit(self.ystd) + + def _y_full_to_fit(self, y, xdata=None): + """Convert data from full model to fit model""" + return y + + def _ystd_full_to_fit(self, ystd, xdata=None): + """Convert standard deviation from full model to fit model""" + return ystd + + def _y_fit_to_full(self, y, xdata=None): + """Convert data from fit model to full model""" + return y + + def evaluate_fullmodel(self, xdata=None): + """Evaluate the full model. + + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + y = self.evaluate_fitmodel(xdata=xdata) + return self._y_fit_to_full(y, xdata=xdata) + + def evaluate_decomposed_fullmodel(self, xdata=None): + """Evaluate the full model. + + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + y = self.evaluate_decomposed_fitmodel(xdata=xdata) + return self._y_fit_to_full(y, xdata=xdata) + + def derivative_fitmodel(self, param_idx, xdata=None, **paramtype): + """Derivate to a specific parameter of the fit model. + + :param int param_idx: + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + return self.numerical_derivative_fitmodel(param_idx, xdata=xdata, **paramtype) + + def numerical_derivative_fitmodel(self, param_idx, xdata=None, **paramtype): + """Derivate to a specific parameter of the fit model. + + :param int param_idx: + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + group = self._group_from_parameter_index(param_idx, **paramtype) + parameters = self.get_parameter_values(**paramtype) + try: + if group.is_independent_linear: + return self._numerical_derivative_independent_linear_param( + parameters, param_idx, xdata=xdata, **paramtype + ) + else: + return self._numerical_derivative_param( + parameters, param_idx, xdata=xdata, **paramtype + ) + finally: + self.set_parameter_values(parameters, **paramtype) + + def _numerical_derivative_independent_linear_param( + self, parameters, param_idx, xdata=None, **paramtype + ): + """The numerical derivative to an independent linear parameter + is exact within arithmetic precision. + """ + # y(x) = p0*f0(x) + ... + pi*fi(x) + ... + # dy/dpi(x) = fi(x) + parameters = parameters.copy() + for group in self._iter_parameter_groups(**paramtype): + if group.is_independent_linear: + parameters[group.index] = 0 + parameters[param_idx] = 1 + self.set_parameter_values(parameters, **paramtype) + return self.evaluate_fitmodel(xdata=xdata) + + def _numerical_derivative_param( + self, parameters, param_idx, xdata=None, **paramtype + ): + """The numerical derivative to a non-linear or dependent-linear + parameter is an approximation. + """ + # Choose delta to be a small fraction of the parameter value but + # not too small, otherwise the derivative is zero. + p0 = parameters[param_idx] + delta = p0 * 1e-5 + if delta < 0: + delta = min(delta, -1e-12) + else: + delta = max(delta, 1e-12) + + parameters = parameters.copy() + parameters[param_idx] = p0 + delta + self.set_parameter_values(parameters, **paramtype) + f1 = self.evaluate_fitmodel(xdata=xdata) + + parameters[param_idx] = p0 - delta + self.set_parameter_values(parameters, **paramtype) + f2 = self.evaluate_fitmodel(xdata=xdata) + + return (f1 - f2) / (2.0 * delta) + + def compare_derivatives(self, xdata=None, exclude_fixed=False, **paramtype): + """Compare analytical and numerical derivatives. Useful to + validate the user defined `derivative_fitmodel`. + + :yields str, array, array: parameter name, analytical, numerical + """ + names = self.get_parameter_names(**paramtype) + constraints = self.get_parameter_constraints(**paramtype) + for param_idx, (name, constraint) in enumerate(zip(names, constraints)): + if exclude_fixed and constraint[0] == Gefit.CFIXED: + continue + ycalderiv = self.derivative_fitmodel(param_idx, xdata=xdata, **paramtype) + ynumderiv = self.numerical_derivative_fitmodel( + param_idx, xdata=xdata, **paramtype + ) + yield name, ycalderiv, ynumderiv + + +class LeastSquaresCombinedFitModel(LeastSquaresFitModelBase, ParameterModelManager): + """A least-squares parameter model which manages models that implement the fit model""" + + @property + def ndata(self): + return sum(model.ndata for model in self.models) + + @property + def xdata(self): + return self._get_concatenated_data("xdata") + + @xdata.setter + def xdata(self, values): + self._set_concatenated_data("xdata", values) + + @property + def ydata(self): + return self._get_concatenated_data("ydata") + + @ydata.setter + def ydata(self, values): + self._set_concatenated_data("ydata", values) + + @property + def ystd(self): + return self._get_concatenated_data("ystd") + + @ystd.setter + def ystd(self, values): + self._set_concatenated_data("ystd", values) + + @property + def yfitdata(self): + return self._get_concatenated_data("yfitdata") + + @property + def yfitstd(self): + return self._get_concatenated_data("yfitstd") + + def evaluate_fullmodel(self, xdata=None): + """Evaluate the full model. + + :param array xdata: shape (ndata,) or (nmodels, ndatai) + :returns array: shape (ndata,) or (sum(ndatai),) + """ + return self._concatenate_evaluation("evaluate_fullmodel", xdata=xdata) + + def evaluate_decomposed_fullmodel(self, xdata=None): + """Evaluate the full model. + + :param array xdata: shape (ndata,) or (nmodels, ndatai) + :returns array: shape (ndata,) or (sum(ndatai),) + """ + return self._concatenate_evaluation( + "evaluate_decomposed_fullmodel", xdata=xdata + ) + + def evaluate_fitmodel(self, xdata=None, _strided=False): + """Evaluate the fit model. + + :param array xdata: shape (ndata,) or (nmodels, ndatai) + :param bool _strided: + :returns array: shape (ndata,) or (sum(ndatai),) + """ + return self._concatenate_evaluation( + "evaluate_fitmodel", xdata=xdata, strided=_strided + ) + + def derivative_fitmodel(self, param_idx, xdata=None, _strided=False, **paramtype): + """Derivate to a specific parameter of the fit model. + + :param int param_idx: + :param array xdata: shape (ndata,) + :param bool _strided: + :returns array: shape (ndata,) + """ + return self._get_concatenated_derivative( + "derivative_fitmodel", param_idx, xdata=xdata, strided=_strided, **paramtype + ) + + def numerical_derivative_fitmodel(self, param_idx, xdata=None, **paramtype): + """Derivate to a specific parameter of the fit model. + + :param int param_idx: + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + return self._get_concatenated_derivative( + "numerical_derivative_fitmodel", param_idx, xdata=xdata, **paramtype + ) + + def _gefit_evaluate_fitmodel(self, parameters, xdata): + """Update parameters and evaluate model + + :param array parameters: shape (nparams,) + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + self.set_parameter_values(parameters) + return self.evaluate_fitmodel(xdata=xdata, _strided=True) + + def _gefit_derivative_fitmodel(self, parameters, param_idx, xdata): + """Update parameters and return derivate to a specific parameter + + :param array parameters: shape (nparams,) + :param int param_idx: + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + self.set_parameter_values(parameters) + return self.derivative_fitmodel(param_idx, xdata=xdata, _strided=True) + + def _get_concatenated_data(self, attr): + """ + :param str attr: + :returns array or None: + """ + if self.nmodels == 0: + return None + data = [getattr(model, attr) for model in self.models] + isnone = [d is None for d in data] + if any(isnone): + assert all(isnone) + return None + return numpy.concatenate(data) + + def _set_concatenated_data(self, attr, values): + """ + :param str attr: + :param array values: + """ + if len(values) != self.ndata: + raise ValueError("Not the expected number of channels") + for model, values, _ in self._iter_model_data_slices(values): + setattr(model, attr, values) + + def _concatenate_evaluation(self, funcname, xdata=None, strided=False): + """Evaluate model + + :param array xdata: shape (ndata,) or (nmodels, ndatai) + :param bool strided: + :returns array: shape (ndata,) or (sum(ndatai),) + """ + ret = numpy.empty(self._get_ndata(xdata, strided=strided)) + for model, xdata, idx in self._iter_model_data_slices(xdata, strided=strided): + func = getattr(model, funcname) + ret[idx] = func(xdata=xdata) + return ret + + def _get_concatenated_derivative( + self, funcname, param_idx, xdata=None, strided=False, **paramtype + ): + """Derivate to a specific parameter of the fit model. + + :param int param_idx: + :param array xdata: shape (ndata,) + :param bool strided: + :returns array: shape (ndata,) + """ + n = self._get_ndata(xdata, strided=strided) + ret = numpy.zeros(n) + group = self._group_from_parameter_index(param_idx, **paramtype) + + param_idx0 = param_idx + cached = self._in_property_caching_context() + if not cached: + param_group_idx = group.parameter_index_in_group(param_idx0) + for modeli, xdata, idx in self._iter_model_data_slices(xdata, strided=strided): + if ( + not group.linked + and modeli._linked_instance_to_key != group.instance_key + ): + continue + if cached: + param_idx = param_idx0 + else: + groupi = modeli._group_from_parameter_name( + group.property_name, **paramtype + ) + param_idx = groupi.start_index + param_group_idx + func = getattr(modeli, funcname, funcname) + ret[idx] = func(param_idx, xdata=xdata, **paramtype) + return ret + + def _get_ndata(self, data, strided=False): + """ + :param array data: shape (ndata,) or (nmodels, ndatai) + :param bool strided: + :returns int: + """ + ndata = self.ndata + if data is None: + return ndata + elif strided: + return len(data) + elif len(data) == ndata: + return ndata + else: + if len(data) != self.nmodels: + raise ValueError(f"Expected {self.nmodels} data arrays") + return sum(len(x) for x in data) + + def _iter_model_data_slices(self, data, strided=False): + """ + :param array data: shape (ndata,) or (nmodels, ndatai) + :param bool strided: + :yields tuple: model, datai, slice + """ + i = 0 + if data is None: + for model in self.models: + n = model.ndata + yield model, model.xdata, slice(i, i + n) + i += n + elif len(data) == self.ndata: + for model in self.models: + n = model.ndata + idx = slice(i, i + n) + yield model, data[idx], idx + i += n + elif strided: + indices = self.__model_indices_after_slicing(data) + for model, idx in zip(self.models, indices): + yield model, data[idx], idx + else: + if len(data) != self.nmodels: + raise ValueError(f"Expected {self.nmodels} data arrays") + for model, xdata_model in zip(self.models, data): + n = len(xdata_model) + yield model, xdata_model, slice(i, i + n) + i += n + + def __model_indices_after_slicing(self, data): + ndata_models = [model.ndata for model in self.models] + stride, remain = divmod(sum(ndata_models), len(data)) + stride += remain > 0 + start = 0 + offset = 0 + i = 0 + for n in ndata_models: + # Index of model in concatenated xdata due to slicing + stop = start + n + lst = list(range(start + offset, stop, stride)) + nlst = len(lst) + # Index of model in concatenated xdata after slicing + idx = slice(i, i + nlst) + i += nlst + # Prepare for next model + if lst: + offset = lst[-1] + stride - stop + else: + offset -= n + start = stop + yield idx diff --git a/PyMca5/PyMcaMath/fitting/model/LinkedModel.py b/PyMca5/PyMcaMath/fitting/model/LinkedModel.py new file mode 100644 index 000000000..32cccc2a6 --- /dev/null +++ b/PyMca5/PyMcaMath/fitting/model/LinkedModel.py @@ -0,0 +1,226 @@ +import functools +from contextlib import ExitStack, contextmanager +from collections.abc import Mapping + +from .PropertyUtils import wrapped_property + + +class linked_property(wrapped_property): + """Setting a linked property of one object + will set that property for all linked objects + """ + + def __init__(self, *args, **kw): + self.propagate = False + super().__init__(*args, **kw) + + def _wrap_setter(self, fset): + propname = fset.__name__ + + @functools.wraps(fset) + def wrapper(oself, value): + fset(oself, value) + if not self.propagate: + return + for instance in oself._filter_class_has_linked_property( + oself._non_propagating_instances, propname + ): + setattr(instance, propname, value) + + return super()._wrap_setter(wrapper) + + +def linked_contextmanager(method): + """Entering the context manager of one object + will enter the context manager of linked objects + """ + context_name = method.__name__ + ctxmethod = contextmanager(method) + + @functools.wraps(method) + def wrapper(self, *args, **kw): + with ExitStack() as stack: + ctx = ctxmethod(self, *args, **kw) + stack.enter_context(ctx) + for instance in self._non_propagating_instances: + ctxmgr = getattr(instance, context_name) + if ctxmgr is not None: + ctx = ctxmgr(*args, **kw) + stack.enter_context(ctx) + yield + + return contextmanager(wrapper) + + +class LinkedModel: + """Model with properties and context's that are linked to other + LinkedModel instances. + """ + + def __init__(self, *args, **kw): + self._link_manager = None + self.__propagate = True + super().__init__(*args, **kw) + + @classmethod + def _get_linked_property(cls, prop_name): + prop = getattr(cls, prop_name, None) + if isinstance(prop, linked_property): + return prop + return None + + @classmethod + def _has_linked_property(cls, prop_name): + return cls._get_linked_property(prop_name) is not None + + @classmethod + def _property_is_linked(cls, prop_name): + prop = cls._get_linked_property(prop_name) + if prop is None: + return None + return prop.propagate + + @classmethod + def _disable_property_link(cls, *prop_names): + for prop_name in prop_names: + prop = cls._get_linked_property(prop_name) + if prop is not None: + prop.propagate = False + + @classmethod + def _enable_property_link(cls, *prop_names): + for prop_name in prop_names: + prop = cls._get_linked_property(prop_name) + if prop is not None: + prop.propagate = True + + @property + def is_linked(self): + return self._link_manager is not None + + @property + def _link_manager(self): + return self.__link_manager + + @_link_manager.setter + def _link_manager(self, obj): + if obj is not None and not isinstance(obj, LinkedModelManager): + raise TypeError(obj, type(obj)) + self.__link_manager = obj + + @property + def _linked_instances(self): + if self._link_manager is None: + return + for instance in self._link_manager._linked_instances: + if instance is not self: + yield instance + + @property + def _linked_instance_to_key(self): + if self._link_manager is None: + return None + return self._link_manager._linked_instance_to_key(self) + + @property + def _non_propagating_instances(self): + if not self.__propagate: + return + for instance in self._linked_instances: + with instance._disable_propagation(): + yield instance + + @contextmanager + def _disable_propagation(self): + keep = self.__propagate + self.__propagate = False + try: + yield + finally: + self.__propagate = keep + + @staticmethod + def _filter_class_has_linked_property(instances, prop_name): + for instance in instances: + if instance._has_linked_property(prop_name): + yield instance + + +class LinkedModelManager: + """Model that manages linked LinkedModel objects""" + + def __init__(self, linked_instances=None, *args, **kw): + super().__init__(*args, **kw) + self._linked_instance_mapping = linked_instances + + @property + def _linked_instances(self): + return self.__linked_instance_mapping.values() + + @property + def _linked_instance_mapping(self): + return self.__linked_instance_mapping + + @_linked_instance_mapping.setter + def _linked_instance_mapping(self, instances): + if instances is None: + instances = dict() + elif not isinstance(instances, Mapping): + raise TypeError(instances, "Linked instance must be a 'Mapping'") + for instance in instances.values(): + if not isinstance(instance, LinkedModel): + raise TypeError( + instance, "Linked instance must be of type 'LinkedModel'" + ) + self.__linked_instance_mapping = instances + for instance in instances.values(): + instance._link_manager = self + + def _linked_instance_to_key(self, instance): + for name, _instance in self._linked_instance_mapping.items(): + if _instance is instance: + return name + return None + + def _linked_key_to_instance(self, name): + return self._linked_instance_mapping[name] + + def _instances_with_linked_property(self, prop_name): + yield from LinkedModel._filter_class_has_linked_property( + self._linked_instances, prop_name + ) + + def _instance_with_linked_property(self, prop_name): + for instance in self._instances_with_linked_property(prop_name): + return instance + return None + + def _get_linked_property(self, prop_name): + instance = self._instance_with_linked_property(prop_name) + if instance is None: + raise ValueError(f"No instance has linked property {repr(prop_name)}") + return getattr(type(instance), prop_name) + + def _get_linked_property_value(self, prop_name): + instance = self._instance_with_linked_property(prop_name) + if instance is None: + raise ValueError(f"No instance has linked property {repr(prop_name)}") + return getattr(instance, prop_name) + + def _set_linked_property_value(self, prop_name, value): + instance = self._instance_with_linked_property(prop_name) + if instance is None: + raise ValueError(f"No instance has linked property {repr(prop_name)}") + setattr(instance, prop_name, value) + + def _disable_property_link(self, *names): + for name in names: + for instance in self._instances_with_linked_property(name): + instance._disable_property_link(name) + + def _enable_property_link(self, *names): + for name in names: + value = self._get_linked_property_value(name) + for instance in self._instances_with_linked_property(name): + instance._enable_property_link(name) + self._set_linked_property_value(name, value) diff --git a/PyMca5/PyMcaMath/fitting/model/ParameterModel.py b/PyMca5/PyMcaMath/fitting/model/ParameterModel.py new file mode 100644 index 000000000..802877518 --- /dev/null +++ b/PyMca5/PyMcaMath/fitting/model/ParameterModel.py @@ -0,0 +1,433 @@ +from typing import Any +from dataclasses import dataclass, field +from contextlib import contextmanager +import numpy +from enum import Flag + +from .CachingLinkedModel import CachedPropertiesLinkModel +from .LinkedModel import LinkedModelManager +from .LinkedModel import linked_property +from .CachingModel import CachedPropertiesModel +from .CachingModel import cached_property + + +ParameterType = Flag("ParameterType", "non_linear dependent_linear independent_linear") +LinearParameterTypes = ParameterType.dependent_linear | ParameterType.independent_linear +AllParameterTypes = ( + ParameterType.non_linear + | ParameterType.dependent_linear + | ParameterType.independent_linear +) + + +class _parameter_group(cached_property, linked_property): + """Specify a getter and setter for a group of fit parameters. + The counter and constraints are optional. When the counter + returns zero, the variable is excluded from the fit parameters. + Use CFIXED in the constraints if the parameters should be + included but not optimized. + + Usage: + + .. highlight:: python + .. code-block:: python + + class MyClass(Model): + + def __init__(self): + self._myparam = 0. + + @nonlinear_parameter_group + def myparam(self): + return self._myparam + + @myparam.setter # optional + def myparam(self, value): + self._myparam = value + + @myparam.counter # optional + def myparam(self): + return 1 + + @myparam.constraints # optional + def myparam(self): + return 1, 0, 0 + """ + + TYPE = NotImplemented + + def __init__(self, *args, **kw): + self.fcount = self._fcount_default() + self.fconstraints = self._fconstraints_default() + super().__init__(*args, **kw) + + def counter(self, fcount): + self.fcount = fcount + return self + + def constraints(self, fconstraints): + self.fconstraints = fconstraints + return self + + def _fcount_default(self): + def fcount(oself): + values = self.fget(oself, nocache=True) + try: + return len(values) + except TypeError: + return 1 + + return fcount + + def _fconstraints_default(self): + def fconstraints(oself): + return numpy.zeros((self.fcount(oself), 3)) + + return fconstraints + + +class nonlinear_parameter_group(_parameter_group): + TYPE = ParameterType.non_linear + + +class dependent_linear_parameter_group(_parameter_group): + TYPE = ParameterType.dependent_linear + + +class independent_linear_parameter_group(_parameter_group): + TYPE = ParameterType.independent_linear + + +@dataclass(frozen=True, eq=True) +class ParameterGroupId: + name: str + property_name: str = field(compare=False, hash=False) + type: ParameterType = field(compare=False, hash=False) + linked: bool = field(compare=False, hash=False) + count: int = field(compare=False, hash=False) + start_index: int = field(compare=False, hash=False) + stop_index: int = field(compare=False, hash=False) + index: Any = field(compare=False, hash=False) + instance_key: Any = field(compare=False, hash=False) + get_constraints: Any = field(compare=False, hash=False, repr=False) + + def parameter_names(self): + if self.count > 1: + for i in range(self.count): + yield self.name + str(i) + elif self.count == 1: + yield self.name + + def contains_parameter_index(self, param_idx): + return self.start_index <= param_idx < self.stop_index + + def parameter_index_in_group(self, param_idx): + if self.contains_parameter_index(param_idx): + return param_idx - self.start_index + return None + + @property + def is_linear(self): + return self.type != ParameterType.non_linear + + @property + def is_independent_linear(self): + return self.type == ParameterType.independent_linear + + +class ParameterModelBase(CachedPropertiesModel): + """Interface for all models that manage fit parameters""" + + @property + def parameter_types(self): + raise NotImplementedError + + @parameter_types.setter + def parameter_types(self, value): + raise NotImplementedError + + @property + def only_linear_parameters(self): + return not bool(self.parameter_types & ParameterType.non_linear) + + @contextmanager + def parameter_types_context(self, value=None): + keep = self.parameter_types + if value is not None: + self.parameter_types = value + try: + yield + finally: + self.parameter_types = keep + + def _property_cache_key(self, parameter_types=None, **paramtype): + if parameter_types is None: + parameter_types = self.parameter_types + elif not isinstance(parameter_types, ParameterType): + raise TypeError(parameter_types, "must be None or ParameterType") + return parameter_types + + def _create_empty_property_values_cache(self, key, **paramtype): + return numpy.zeros(self.get_n_parameters(**paramtype)) + + def _property_index_from_id(self, group, **cacheoptions): + return group.index + + def _property_name_from_id(self, group): + return group.property_name + + def get_parameter_names(self, **paramtype): + return tuple(self._iter_parameter_names(**paramtype)) + + def _iter_parameter_names(self, **paramtype): + for group in self._iter_parameter_groups(**paramtype): + yield from group.parameter_names() + + def get_n_parameters(self, **paramtype): + return sum(group.count for group in self._iter_parameter_groups(**paramtype)) + + def get_parameter_values(self, **paramtype): + return self._get_property_values(**paramtype) + + def set_parameter_values(self, values, **paramtype): + self._set_property_values(values, **paramtype) + + def get_parameter_constraints(self, **paramtype): + """ + :returns array: nparams x 3 + """ + return numpy.vstack( + tuple( + self._normalize_constraints(group.get_constraints()) + for group in self._iter_parameter_groups(**paramtype) + ) + ) + + @staticmethod + def _normalize_constraints(constraints): + constraints = numpy.atleast_1d(constraints) + if constraints.ndim not in (1, 2): + raise ValueError( + "Parameter group constraints must be of shape (3,) or (nparams, 3)" + ) + if constraints.shape[-1] != 3: + raise ValueError( + "Parameter group constraints must be of shape (3,) or (nparams, 3)" + ) + return constraints.tolist() + + def get_parameter_group_value(self, group, **paramtype): + return self._get_property_value(group, **paramtype) + + def set_parameter_group_value(self, group, value, **paramtype): + self._set_property_value(group, value, **paramtype) + + def get_parameter_groups(self, **paramtype): + return tuple(self._iter_parameter_groups(**paramtype)) + + def _property_id_from_name(self, property_name): + return self._group_from_parameter_name(property_name) + + def get_parameter_group_names(self, **paramtype): + return tuple(group.name for group in self._iter_parameter_groups(**paramtype)) + + def _iter_parameter_groups(self, **paramtype): + """This will only yield the groups with count > 0. + + :yields ParameterGroupId: + """ + yield from self._iter_cached_property_ids(**paramtype) + + def _group_from_parameter_index(self, param_idx, **paramtype): + for group in self._iter_parameter_groups(**paramtype): + if group.start_index <= param_idx < group.stop_index: + return group + return None + + def _group_from_parameter_name(self, property_name, **paramtype): + for group in self._iter_parameter_groups(**paramtype): + if group.property_name == property_name: + return group + return None + + +class ParameterModel(CachedPropertiesLinkModel, ParameterModelBase): + """Model that implements fit parameters""" + + def __init__(self, *args, **kw): + super().__init__(*args, **kw) + self.__parameter_types = AllParameterTypes + + def _iter_cached_property_ids(self, **paramtype): + instance_key = self._linked_instance_to_key + for propid in super()._iter_cached_property_ids(**paramtype): + if propid.linked or propid.instance_key == instance_key: + yield propid + + def __iter_parameter_group_properties(self): + cls = type(self) + for property_name in self._cached_property_names(): + prop = getattr(cls, property_name) + if not isinstance(prop, _parameter_group): + raise TypeError( + "Currently only parameter _group properties support caching" + ) + yield property_name, prop + + def _instance_cached_property_ids( + self, parameter_types=None, linked=None, tracker=None + ): + """ + :param parameter_types ParameterType: only these parameter types + :param linked bool: linked parameters or unlinked parameters + :param tracker _IterGroupTracker: + :yields ParameterGroupId: + """ + if parameter_types is None: + parameter_types = self.parameter_types + elif not isinstance(parameter_types, ParameterType): + raise TypeError(parameter_types, "must be of type ParameterType") + count = None + index = None + if tracker is None: + start_index = 0 + else: + start_index = tracker.start_index + for property_name, prop in self.__iter_parameter_group_properties(): + group_is_linked = prop.propagate + if linked is not None: + if group_is_linked is not linked: + continue + + if not (parameter_types & prop.TYPE): + continue + + count = prop.fcount(self) + if not count: + continue + + stop_index = start_index + count + if count > 1: + index = slice(start_index, stop_index) + elif count == 1: + index = start_index + else: + index = None + + instance_key = self._linked_instance_to_key + if group_is_linked or instance_key is None: + name = property_name + else: + name = f"{instance_key}:{property_name}" + + group = ParameterGroupId( + name=name, + type=prop.TYPE, + linked=group_is_linked, + property_name=property_name, + instance_key=instance_key, + count=count, + start_index=start_index, + stop_index=stop_index, + index=index, + get_constraints=self.__constraints_getter(prop), + ) + if tracker is None: + yield group + start_index += count + elif tracker.is_new_group(group): + yield group + start_index = tracker.start_index + + def __constraints_getter(self, prop): + def get_constraints(): + return prop.fconstraints(self) + + return get_constraints + + @linked_property + def parameter_types(self): + return self.__parameter_types + + @parameter_types.setter + def parameter_types(self, value): + if not isinstance(value, ParameterType): + raise TypeError(value, "must be None or ParameterType") + self.__parameter_types = value + + def _get_noncached_property_value(self, group): + return getattr(self, group.property_name) + + def _set_noncached_property_value(self, group, value): + setattr(self, group.property_name, value) + + +class _IterGroupTracker: + def __init__(self): + self._start_index = 0 + self._encountered = set() + + @property + def start_index(self): + return self._start_index + + def is_new_group(self, group): + if group in self._encountered: + return False + self._encountered.add(group) + self._start_index += group.count + return True + + +class ParameterModelManager(ParameterModelBase, LinkedModelManager): + """Model that manages linked models that implement fit parameters.""" + + def __init__(self, *args, **kw): + super().__init__(*args, **kw) + self._enable_property_link("parameter_types") + for model in self.models: + model._cache_manager = self + + @property + def models(self): + return self._linked_instances + + @property + def model_mapping(self): + return self._linked_instance_mapping + + @property + def nmodels(self): + return len(self.model_mapping) + + @property + def parameter_types(self): + return self._get_linked_property_value("parameter_types") + + @parameter_types.setter + def parameter_types(self, value): + self._set_linked_property_value("parameter_types", value) + + def _instance_cached_property_ids(self, **paramtype): + """ + :yields ParameterGroupId: + """ + # Shared parameters + tracker = _IterGroupTracker() + for model in self.models: + yield from model._instance_cached_property_ids( + linked=True, tracker=tracker, **paramtype + ) + # Non-shared parameters + for model in self.models: + yield from model._instance_cached_property_ids( + linked=False, tracker=tracker, **paramtype + ) + + def _get_noncached_property_value(self, group): + instance = self._linked_key_to_instance(group.instance_key) + return getattr(instance, group.property_name) + + def _set_noncached_property_value(self, group, value): + instance = self._linked_key_to_instance(group.instance_key) + setattr(instance, group.property_name, value) diff --git a/PyMca5/PyMcaMath/fitting/model/PolynomialModels.py b/PyMca5/PyMcaMath/fitting/model/PolynomialModels.py new file mode 100644 index 000000000..ad3202125 --- /dev/null +++ b/PyMca5/PyMcaMath/fitting/model/PolynomialModels.py @@ -0,0 +1,124 @@ +import numpy + +from .LeastSquaresFitModel import LeastSquaresFitModel +from .ParameterModel import independent_linear_parameter_group + + +class PolynomialModel(LeastSquaresFitModel): + def __init__(self, degree=0, maxiter=100): + self._xdata = None + self._ydata = None + self._mask = None + self.degree = degree + self.maxiter = maxiter + super().__init__() + + @property + def degree(self): + return self._coefficients.size - 1 + + @degree.setter + def degree(self, n): + if n < 0: + raise ValueError("degree must be a positive integer") + self._coefficients = numpy.zeros(n + 1) + + @property + def coefficients(self): + return self._coefficients + + @coefficients.setter + def coefficients(self, values): + self._coefficients[:] = values + + @property + def xdata(self): + return self._xdata + + @xdata.setter + def xdata(self, values): + self._xdata = values + + @property + def ydata(self): + return self._ydata + + @ydata.setter + def ydata(self, values): + self._ydata = values + + @property + def ystd(self): + return None + + @property + def maxiter(self): + return self._maxiter + + @maxiter.setter + def maxiter(self, value): + self._maxiter = value + + +class LinearPolynomialModel(PolynomialModel): + """y = c0 + c1*x + c2*x^2 + ...""" + + @independent_linear_parameter_group + def fitmodel_coefficients(self): + return self.coefficients + + @fitmodel_coefficients.setter + def fitmodel_coefficients(self, values): + self.coefficients = values + + def evaluate_fitmodel(self, xdata=None): + """Evaluate the fit model, not the full model. + + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + if xdata is None: + xdata = self.xdata + coeff = numpy.atleast_1d(self.fitmodel_coefficients) + y = coeff[0] * numpy.ones_like(xdata) + for i in range(1, len(coeff)): + y += coeff[i] * (xdata ** i) + return y + + def derivative_fitmodel(self, param_idx, xdata=None, **paramtype): + """Derivate to a specific parameter + + :param int param_idx: + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + if xdata is None: + xdata = self.xdata + if param_idx == 0: + return numpy.ones_like(xdata) + else: + return xdata ** param_idx + + +class ExponentialPolynomialModel(LinearPolynomialModel): + """y = c0 * exp[c1*x + c2*x^2 + ...] + yfit = log(y) = log(c1) + c1*x + c2*x^2 + ... + """ + + @independent_linear_parameter_group + def fitmodel_coefficients(self): + coefficients = self.coefficients.copy() + coefficients[0] = numpy.log(coefficients[0]) + return coefficients + + @fitmodel_coefficients.setter + def fitmodel_coefficients(self, values): + values = numpy.atleast_1d(values).copy() + values[0] = numpy.exp(values[0]) + self.coefficients = values + + def _y_full_to_fit(self, y, xdata=None): + return numpy.log(y) + + def _y_fit_to_full(self, y, xdata=None): + return numpy.exp(y) diff --git a/PyMca5/PyMcaMath/fitting/model/PropertyUtils.py b/PyMca5/PyMcaMath/fitting/model/PropertyUtils.py new file mode 100644 index 000000000..00778c2de --- /dev/null +++ b/PyMca5/PyMcaMath/fitting/model/PropertyUtils.py @@ -0,0 +1,97 @@ +class _wrapped_property(property): + """Property that prepares fget, fset and fdel wrappers + for derived property classes. + """ + + def __init__( + self, + fget=None, + fset=None, + fdel=None, + doc=None, + ) -> None: + if fget is not None: + fget = self._wrap_getter(fget) + if fset is not None: + fset = self._wrap_setter(fset) + if fdel is not None: + fget = self._wrap_deleter(fdel) + super().__init__( + fget=fget, + fset=fset, + fdel=fdel, + doc=doc, + ) + + def getter(self, fget): + """Decorator to change fget after property instantiation""" + if fget is not None: + fget = self._wrap_getter(fget) + return super().getter(fget) + + def setter(self, fset): + """Decorator to change fset after property instantiation""" + if fset is not None: + fset = self._wrap_setter(fset) + return super().setter(fset) + + def deleter(self, fdel): + """Decorator to change fdel after property instantiation""" + if fdel is not None: + fdel = self._wrap_deleter(fdel) + return super().deleter(fdel) + + def _wrap_getter(self, fget): + """Intended for derived property classes""" + return fget + + def _wrap_setter(self, fset): + """Intended for derived property classes""" + return fset + + def _wrap_deleter(self, fdel): + """Intended for derived property classes""" + return fdel + + +class wrapped_property: + """Can be used like python's builtin property but with + getter and setter hooks for derived classes. + """ + + def __init__(self, fget): + self.fget = self._wrap_getter(fget) + self.fset = None + self.attrname = None + + def __set_name__(self, owner, name): + if self.attrname is None: + self.attrname = name + else: + raise TypeError(f"Cannot use the same {type(self).__name__} twice") + + def __get__(self, instance, owner=None): + if instance is None: + return self + return self.fget(instance) + + def __set__(self, instance, value): + if self.fset is None: + raise AttributeError( + f"{type(instance).__name__}.{self.attrname} has no setter" + ) + if instance is None: + return self + return self.fset(instance, value) + + def setter(self, fset): + self.fset = self._wrap_setter(fset) + return self + + def _wrap_getter(self, fget): + """Intended for derived property classes""" + return fget + + def _wrap_setter(self, fset): + """Intended for derived property classes""" + return fset diff --git a/PyMca5/PyMcaMath/fitting/model/__init__.py b/PyMca5/PyMcaMath/fitting/model/__init__.py new file mode 100644 index 000000000..1d76e063b --- /dev/null +++ b/PyMca5/PyMcaMath/fitting/model/__init__.py @@ -0,0 +1,12 @@ +"""Base classes for fit models and combined fit models +""" + +from PyMca5.PyMcaMath.fitting.model.ParameterModel import ( + nonlinear_parameter_group, + dependent_linear_parameter_group, + independent_linear_parameter_group, +) +from PyMca5.PyMcaMath.fitting.model.LeastSquaresFitModel import ( + LeastSquaresFitModel, + LeastSquaresCombinedFitModel, +) diff --git a/PyMca5/PyMcaMisc/ProfilingUtils.py b/PyMca5/PyMcaMisc/ProfilingUtils.py index 09ad1ff78..31d16a91f 100644 --- a/PyMca5/PyMcaMisc/ProfilingUtils.py +++ b/PyMca5/PyMcaMisc/ProfilingUtils.py @@ -1,4 +1,4 @@ -#/*########################################################################## +# /*########################################################################## # # The PyMca X-Ray Fluorescence Toolkit # @@ -30,6 +30,7 @@ __contact__ = "wout.de_nolf@esrf.eu" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" + try: import tracemalloc import linecache @@ -37,11 +38,11 @@ tracemalloc = None import cProfile import pstats + try: from StringIO import StringIO except ImportError: from io import StringIO -import os import logging from contextlib import contextmanager @@ -49,44 +50,82 @@ logger = logging.getLogger(__name__) -def print_malloc_snapshot(snapshot, key_type='lineno', limit=10, units='KB'): +class bcolors: + HEADER = "\033[95m" + OKBLUE = "\033[94m" + OKGREEN = "\033[92m" + WARNING = "\033[93m" + FAIL = "\033[91m" + ENDC = "\033[0m" + BOLD = "\033[1m" + UNDERLINE = "\033[4m" + + +def durationfmtcolor(x): + if x < 0.0005: + return bcolors.OKGREEN + ("%6dµs" % (x * 1000000)) + bcolors.ENDC + elif x < 0.001: + return bcolors.WARNING + ("%6dµs" % (x * 1000000)) + bcolors.ENDC + elif x < 0.1: + return bcolors.WARNING + ("%6dms" % (x * 1000)) + bcolors.ENDC + else: + return bcolors.FAIL + ("%8.3f" % x) + bcolors.ENDC + + +def durationfmt(x): + if x < 0.001: + return "%6dµs" % (x * 1000000) + elif x < 0.1: + return "%6dms" % (x * 1000) + else: + return "%8.3f" % x + + +def print_malloc_snapshot(snapshot, key_type="lineno", limit=10, units="KB"): """ :param tracemalloc.Snapshot snapshot: :param str key_type: :param int limit: limit number of lines :param str units: B, KB, MB, GB """ - n = ['b', 'kb', 'mb', 'gb'].index(units.lower()) - sunits, units = units, 1024**n + n = ["b", "kb", "mb", "gb"].index(units.lower()) + sunits, units = units, 1024 ** n - snapshot = snapshot.filter_traces(( - tracemalloc.Filter(False, ""), - tracemalloc.Filter(False, ""), - )) + snapshot = snapshot.filter_traces( + ( + tracemalloc.Filter(False, ""), + tracemalloc.Filter(False, ""), + ) + ) top_stats = snapshot.statistics(key_type) total = sum(stat.size for stat in top_stats) - print('================Memory profile================') + logger.info("================Memory profile================") + out = "" for index, stat in enumerate(top_stats, 1): frame = stat.traceback[0] # replace "/path/to/module/file.py" with "module/file.py" - #filename = os.sep.join(frame.filename.split(os.sep)[-2:]) + # filename = os.sep.join(frame.filename.split(os.sep)[-2:]) filename = frame.filename - print("#%s: %s:%s: %.1f %s" - % (index, filename, frame.lineno, stat.size / units, sunits)) + out += "\n#%s: %s:%s: %.1f %s" % ( + index, + filename, + frame.lineno, + stat.size / units, + sunits, + ) line = linecache.getline(frame.filename, frame.lineno).strip() if line: - print(' %s' % line) + out += "\n %s" % line if index >= limit: break - other = top_stats[index:] if other: size = sum(stat.size for stat in other) - print("%s other: %.1f %s" % (len(other), size / units, sunits)) - - print("Total allocated size: %.1f %s" % (total / units, sunits)) - print('============================================') + out += "\n%s other: %.1f %s" % (len(other), size / units, sunits) + out += "\nTotal allocated size: %.1f %s" % (total / units, sunits) + logger.info(out) + logger.info("============================================") @contextmanager @@ -95,42 +134,89 @@ def print_malloc_context(**kwargs): :param **kwargs: see print_malloc_snapshot """ if tracemalloc is None: - logger.error('tracemalloc required') + logger.info("tracemalloc required") return tracemalloc.start() - yield - snapshot = tracemalloc.take_snapshot() - print_malloc_snapshot(snapshot, **kwargs) + try: + yield + finally: + snapshot = tracemalloc.take_snapshot() + print_malloc_snapshot(snapshot, **kwargs) @contextmanager -def print_time_context(restrictions=None, sortby='cumtime'): +def print_time_context(timelimit=None, sortby="cumtime", color=False, filename=None): + """ + :param int or float timelimit: number of lines or fraction (float between 0 and 1) + :param str sortby: sort time profile + :param bool color: + :param str filename: + """ pr = cProfile.Profile() pr.enable() - yield - pr.disable() - s = StringIO() - ps = pstats.Stats(pr, stream=s).sort_stats(sortby) - if restrictions is None: - restrictions = (0.1,) - ps.print_stats(*restrictions) - print('================Time profile================') - print(s.getvalue()) - print('============================================') + try: + yield + finally: + pr.disable() + if isinstance(sortby, str): + sortby = [sortby] + if color: + pstats.f8 = durationfmtcolor + else: + pstats.f8 = durationfmt + for i, sortmethod in enumerate(sortby): + s = StringIO() + ps = pstats.Stats(pr, stream=s) + if sortmethod: + ps = ps.sort_stats(sortmethod) + if timelimit is None: + timelimit = (0.1,) + elif not isinstance(timelimit, tuple): + timelimit = (timelimit,) + ps.print_stats(*timelimit) + if filename and i == 0: + ps.dump_stats(filename) + logger.info("================Time profile================") + msg = "\n" + s.getvalue() + msg += "\n Saved as {}".format(repr(filename)) + logger.info(msg) + logger.info("============================================") @contextmanager -def profile(memory=True, time=True, memlimit=10, - restrictions=None, sortby='cumtime'): +def profile( + memory=True, + time=True, + memlimit=10, + timelimit=None, + sortby="cumtime", + color=False, + filename=None, + units="KB", +): + """ + :param bool memory: profile memory usage + :param bool time: execution time + :param int memlimit: number of lines + :param int or float timelimit: number of lines or fraction (float between 0 and 1) + :param str sortby: sort time profile + :param bool color: + :param str filename: dump for visual tools + :param str units: memory units + """ if not memory and not time: - yield + return elif memory and time: - with print_time_context(restrictions=restrictions, sortby=sortby): - with print_malloc_context(limit=memlimit): + with print_time_context( + timelimit=timelimit, sortby=sortby, color=color, filename=filename + ): + with print_malloc_context(limit=memlimit, units=units): yield elif memory: - with print_malloc_context(limit=memlimit): + with print_malloc_context(limit=memlimit, units=units): yield else: - with print_time_context(restrictions=restrictions, sortby=sortby): + with print_time_context( + timelimit=timelimit, sortby=sortby, color=color, filename=filename + ): yield diff --git a/PyMca5/PyMcaPhysics/xrf/FastXRFLinearFit.py b/PyMca5/PyMcaPhysics/xrf/FastXRFLinearFit.py index e2e408111..9e6b2eb02 100644 --- a/PyMca5/PyMcaPhysics/xrf/FastXRFLinearFit.py +++ b/PyMca5/PyMcaPhysics/xrf/FastXRFLinearFit.py @@ -39,7 +39,7 @@ import time import h5py import collections -from . import ClassMcaTheory +from . import LegacyMcaTheory from . import ConcentrationsTool from PyMca5.PyMcaMath.linalg import lstsq from PyMca5.PyMcaMath.fitting import Gefit @@ -55,7 +55,7 @@ class FastXRFLinearFit(object): def __init__(self, mcafit=None): self._config = None if mcafit is None: - self._mcaTheory = ClassMcaTheory.McaTheory() + self._mcaTheory = LegacyMcaTheory.McaTheory() else: self._mcaTheory = mcafit @@ -518,7 +518,7 @@ def _fitCreateModel(self, dtype=None): freeNames = [] nFreeBkg = 0 for iParam, param in enumerate(self._mcaTheory.PARAMETERS): - if self._mcaTheory.codes[0][iParam] != ClassMcaTheory.Gefit.CFIXED: + if self._mcaTheory.codes[0][iParam] != LegacyMcaTheory.Gefit.CFIXED: nFree += 1 freeNames.append(param) if iParam < self._mcaTheory.NGLOBAL: @@ -532,7 +532,7 @@ def _fitCreateModel(self, dtype=None): derivatives = None idx = 0 for iParam, param in enumerate(self._mcaTheory.PARAMETERS): - if self._mcaTheory.codes[0][iParam] == ClassMcaTheory.Gefit.CFIXED: + if self._mcaTheory.codes[0][iParam] == LegacyMcaTheory.Gefit.CFIXED: continue deriv = self._mcaTheory.linearMcaTheoryDerivative(self._mcaTheory.parameters, iParam, diff --git a/PyMca5/PyMcaPhysics/xrf/LegacyFastXRFLinearFit.py b/PyMca5/PyMcaPhysics/xrf/LegacyFastXRFLinearFit.py index 05621853f..a3c6fbf97 100644 --- a/PyMca5/PyMcaPhysics/xrf/LegacyFastXRFLinearFit.py +++ b/PyMca5/PyMcaPhysics/xrf/LegacyFastXRFLinearFit.py @@ -37,7 +37,7 @@ import numpy import logging from PyMca5.PyMcaMath.linalg import lstsq -from . import ClassMcaTheory +from . import LegacyMcaTheory from PyMca5.PyMcaMath.fitting import Gefit from . import ConcentrationsTool from PyMca5.PyMcaMath.fitting import SpecfitFuns @@ -51,7 +51,7 @@ class FastXRFLinearFit(object): def __init__(self, mcafit=None): self._config = None if mcafit is None: - self._mcaTheory = ClassMcaTheory.McaTheory() + self._mcaTheory = LegacyMcaTheory.McaTheory() else: self._mcaTheory = mcafit @@ -261,7 +261,7 @@ def fitMultipleSpectra(self, x=None, y=None, xmin=None, xmax=None, freeNames = [] nFreeBackgroundParameters = 0 for i, param in enumerate(self._mcaTheory.PARAMETERS): - if self._mcaTheory.codes[0][i] != ClassMcaTheory.Gefit.CFIXED: + if self._mcaTheory.codes[0][i] != LegacyMcaTheory.Gefit.CFIXED: nFree += 1 freeNames.append(param) if i < self._mcaTheory.NGLOBAL: @@ -275,7 +275,7 @@ def fitMultipleSpectra(self, x=None, y=None, xmin=None, xmax=None, derivatives = None idx = 0 for i, param in enumerate(self._mcaTheory.PARAMETERS): - if self._mcaTheory.codes[0][i] == ClassMcaTheory.Gefit.CFIXED: + if self._mcaTheory.codes[0][i] == LegacyMcaTheory.Gefit.CFIXED: continue deriv= self._mcaTheory.linearMcaTheoryDerivative(self._mcaTheory.parameters, i, diff --git a/PyMca5/PyMcaPhysics/xrf/LegacyMcaAdvancedFitBatch.py b/PyMca5/PyMcaPhysics/xrf/LegacyMcaAdvancedFitBatch.py index 3233d9ba7..b67964ae9 100644 --- a/PyMca5/PyMcaPhysics/xrf/LegacyMcaAdvancedFitBatch.py +++ b/PyMca5/PyMcaPhysics/xrf/LegacyMcaAdvancedFitBatch.py @@ -33,7 +33,7 @@ import sys import os import numpy -from . import ClassMcaTheory +from . import LegacyMcaTheory from PyMca5.PyMcaCore import SpecFileLayer from PyMca5.PyMcaCore import EdfFileLayer from PyMca5.PyMcaIO import EdfFile @@ -71,13 +71,13 @@ def __init__(self,initdict,filelist=None,outputdir=None, self.fitFiles = fitfiles self._concentrations = concentrations if type(initdict) == type([]): - self.mcafit = ClassMcaTheory.McaTheory(initdict[mcaoffset]) + self.mcafit = LegacyMcaTheory.McaTheory(initdict[mcaoffset]) self.__configList = initdict self.__currentConfig = mcaoffset else: self.__configList = [initdict] self.__currentConfig = 0 - self.mcafit = ClassMcaTheory.McaTheory(initdict) + self.mcafit = LegacyMcaTheory.McaTheory(initdict) self.__concentrationsKeys = [] if self._concentrations: self._tool = ConcentrationsTool.ConcentrationsTool() @@ -172,7 +172,7 @@ def processList(self): if not self.roiFit: if len(self.__configList) > 1: if i != 0: - self.mcafit = ClassMcaTheory.McaTheory(self.__configList[i]) + self.mcafit = LegacyMcaTheory.McaTheory(self.__configList[i]) self.__currentConfig = i self.mcafit.enableOptimizedLinearFit() @@ -574,7 +574,7 @@ def __processOneMca(self,x,y,filename,key,info=None): if self.mcafit.config['fit'].get("strategyflag", False): config = self.__configList[self.__currentConfig] print("Restoring fitconfiguration") - self.mcafit = ClassMcaTheory.McaTheory(config) + self.mcafit = LegacyMcaTheory.McaTheory(config) self.mcafit.enableOptimizedLinearFit() return try: @@ -614,7 +614,7 @@ def __processOneMca(self,x,y,filename,key,info=None): if self.mcafit.config['fit'].get("strategyflag", False): config = self.__configList[self.__currentConfig] print("Restoring fitconfiguration") - self.mcafit = ClassMcaTheory.McaTheory(config) + self.mcafit = LegacyMcaTheory.McaTheory(config) self.mcafit.enableOptimizedLinearFit() return if self._concentrations: diff --git a/PyMca5/PyMcaPhysics/xrf/ClassMcaTheory.py b/PyMca5/PyMcaPhysics/xrf/LegacyMcaTheory.py similarity index 99% rename from PyMca5/PyMcaPhysics/xrf/ClassMcaTheory.py rename to PyMca5/PyMcaPhysics/xrf/LegacyMcaTheory.py index 616d25612..4294bd7e2 100644 --- a/PyMca5/PyMcaPhysics/xrf/ClassMcaTheory.py +++ b/PyMca5/PyMcaPhysics/xrf/LegacyMcaTheory.py @@ -46,7 +46,7 @@ from PyMca5.PyMcaMath.fitting import Gefit from PyMca5 import PyMcaDataDir _logger = logging.getLogger(__name__) -#"python ClassMcaTheory.py -s1.1 --file=03novs060sum.mca --pkm=McaTheory.dat --continuum=0 --strip=1 --sumflag=1 --maxiter=4" +#"python LegacyMcaTheory.py -s1.1 --file=03novs060sum.mca --pkm=McaTheory.dat --continuum=0 --strip=1 --sumflag=1 --maxiter=4" CONTINUUM_LIST = [None,'Constant','Linear','Parabolic','Linear Polynomial','Exp. Polynomial'] OLDESCAPE = 0 MAX_ATTENUATION = 1.0E-300 @@ -1034,7 +1034,7 @@ def __configure(self): self.laststrip = 0 def setdata(self, *var, **kw): - print("ClassMcaTheory.setdata deprecated, please use setData") + print("LegacyMcaTheory.setdata deprecated, please use setData") return self.setData(*var, **kw) def setData(self,*var,**kw): @@ -2847,7 +2847,7 @@ def estimateexppol(self,x,y,z,xscaling=1.0,yscaling=1.0): return fittedpar,numpy.zeros((3,len(fittedpar)),numpy.float64) -class ClassMcaTheory(McaTheory): +class LegacyMcaTheory(McaTheory): pass """ @@ -2895,8 +2895,8 @@ def test(inputfile=None,scankey=None,pkm=None, if inputfile is None: print("USAGE") - print("python -m PyMca5.PyMcaPhysics.xrf.ClassMcaTheory.py -s1.1 --file=filename --cfg=cfgfile [--plotflag=1]") - #python ClassMcaTheory.py -s2.1 --file=ch09__mca_0005.mca --pkm=TEST.cfg --continuum=0 --stripflag=1 --sumflag=1 --maxiter=4 + print("python -m PyMca5.PyMcaPhysics.xrf.LegacyMcaTheory.py -s1.1 --file=filename --cfg=cfgfile [--plotflag=1]") + #python LegacyMcaTheory.py -s2.1 --file=ch09__mca_0005.mca --pkm=TEST.cfg --continuum=0 --stripflag=1 --sumflag=1 --maxiter=4 sys.exit(0) print("assuming is a specfile ...") sf=specfile.Specfile(inputfile) diff --git a/PyMca5/PyMcaPhysics/xrf/McaAdvancedFitBatch.py b/PyMca5/PyMcaPhysics/xrf/McaAdvancedFitBatch.py index 51b5f801d..c76e6e9f1 100644 --- a/PyMca5/PyMcaPhysics/xrf/McaAdvancedFitBatch.py +++ b/PyMca5/PyMcaPhysics/xrf/McaAdvancedFitBatch.py @@ -35,7 +35,7 @@ import time import logging import numpy -from . import ClassMcaTheory +from . import LegacyMcaTheory from PyMca5.PyMcaCore import SpecFileLayer from PyMca5.PyMcaCore import EdfFileLayer from PyMca5.PyMcaIO import EdfFile @@ -159,13 +159,13 @@ def __init__(self, initdict, filelist=None, outputdir=None, self.chunk = chunk if isinstance(initdict, list): - self.mcafit = ClassMcaTheory.McaTheory(initdict[mcaoffset]) + self.mcafit = LegacyMcaTheory.McaTheory(initdict[mcaoffset]) self.__configList = initdict self.__currentConfig = mcaoffset else: self.__configList = [initdict] self.__currentConfig = 0 - self.mcafit = ClassMcaTheory.McaTheory(initdict) + self.mcafit = LegacyMcaTheory.McaTheory(initdict) self.__concentrationsKeys = [] if self._concentrations: self._tool = ConcentrationsTool.ConcentrationsTool() @@ -272,7 +272,7 @@ def _processList(self): if not self.roiFit: if len(self.__configList) > 1: if i != 0: - self.mcafit = ClassMcaTheory.McaTheory(self.__configList[i]) + self.mcafit = LegacyMcaTheory.McaTheory(self.__configList[i]) self.__currentConfig = i # TODO: outbuffer does not support multiple configurations # Only the first one is saved. @@ -760,7 +760,7 @@ def _restoreFitConfig(self, filename, task): if self.mcafit.config['fit'].get("strategyflag", False): config = self.__configList[self.__currentConfig] _logger.info("Restoring fitconfiguration") - self.mcafit = ClassMcaTheory.McaTheory(config) + self.mcafit = LegacyMcaTheory.McaTheory(config) self.mcafit.enableOptimizedLinearFit() # TODO: why??? def _fitMca(self, filename): diff --git a/PyMca5/PyMcaPhysics/xrf/NewClassMcaTheory.py b/PyMca5/PyMcaPhysics/xrf/NewClassMcaTheory.py new file mode 100644 index 000000000..b903ade14 --- /dev/null +++ b/PyMca5/PyMcaPhysics/xrf/NewClassMcaTheory.py @@ -0,0 +1,2439 @@ +import os +import sys +import copy +import logging +import warnings +from contextlib import contextmanager +import numpy + +from PyMca5 import PyMcaDataDir +from PyMca5.PyMcaIO import ConfigDict +from PyMca5.PyMcaMath.fitting import SpecfitFuns +from PyMca5.PyMcaMath.fitting import Gefit +from PyMca5.PyMcaMath.fitting.model import nonlinear_parameter_group +from PyMca5.PyMcaMath.fitting.model import independent_linear_parameter_group +from PyMca5.PyMcaMath.fitting.model import dependent_linear_parameter_group +from PyMca5.PyMcaMath.fitting.model import LeastSquaresFitModel +from PyMca5.PyMcaMath.fitting.model import LeastSquaresCombinedFitModel +from PyMca5.PyMcaMath.fitting.model.PolynomialModels import LinearPolynomialModel +from PyMca5.PyMcaMath.fitting.model.PolynomialModels import ExponentialPolynomialModel +from PyMca5.PyMcaMath.fitting.model.LinkedModel import linked_property +from PyMca5.PyMcaMath.fitting.model.ParameterModel import ParameterType +from PyMca5.PyMcaMath.fitting.model.ParameterModel import AllParameterTypes + +from . import Elements +from . import ConcentrationsTool + +FISX = ConcentrationsTool.FISX +if FISX: + FisxHelper = ConcentrationsTool.FisxHelper + + +_logger = logging.getLogger(__name__) + + +def defaultConfigFilename(): + dirname = PyMcaDataDir.PYMCA_DATA_DIR + filename = os.path.join(dirname, "McaTheory.cfg") + if not os.path.exists(filename): + # Frozen version deals differently with the path + dirname = os.path.dirname(dirname) + filename = os.path.join(dirname, "McaTheory.cfg") + if not os.path.exists(filename): + if dirname.lower().endswith(".zip"): + dirname = os.path.dirname(dirname) + filename = os.path.join(dirname, "McaTheory.cfg") + if os.path.exists(filename): + return filename + else: + print("Cannot find file McaTheory.cfg") + raise IOError("File %s does not exist" % filename) + + +class McaTheoryConfigApi: + """API on top of a ConfigDict for MCA configuration""" + + def __init__(self, initdict=None, filelist=None, **kw): + if initdict is None: + initdict = defaultConfigFilename() + if os.path.exists(initdict.split("::")[0]): + self.config = ConfigDict.ConfigDict(filelist=initdict) + else: + raise IOError("File %s does not exist" % initdict) + self._overwriteConfig(**kw) + self.attflag = kw.get("attenuatorsflag", 1) + self.configure() + super().__init__() + + def _overwriteConfig(self, **kw): + if "config" in kw: + self.config.update(kw["config"]) + cfgfit = self.config["fit"] + cfgfit["sumflag"] = kw.get("sumflag", cfgfit["sumflag"]) + cfgfit["escapeflag"] = kw.get("escapeflag", cfgfit["escapeflag"]) + cfgfit["continuum"] = kw.get("continuum", cfgfit["continuum"]) + cfgfit["stripflag"] = kw.get("stripflag", cfgfit["stripflag"]) + cfgfit["maxiter"] = kw.get("maxiter", cfgfit["maxiter"]) + cfgfit["hypermetflag"] = kw.get("hypermetflag", cfgfit["hypermetflag"]) + + def _addMissingConfig(self): + """Add missing information to the configuration""" + cfgroot = self.config + + cfgroot["userattenuators"] = cfgroot.get("userattenuators", {}) + cfgroot["multilayer"] = cfgroot.get("multilayer", {}) + cfgroot["materials"] = cfgroot.get("materials", {}) + cfgroot["concentrations"] = cfgroot.get("concentrations", {}) + + cfgpeakshape = cfgroot["peakshape"] + cfgpeakshape["eta_factor"] = cfgpeakshape.get("eta_factor", 0.02) + cfgpeakshape["fixedeta_factor"] = cfgpeakshape.get("fixedeta_factor", 0) + cfgpeakshape["deltaeta_factor"] = cfgpeakshape.get( + "deltaeta_factor", cfgpeakshape["eta_factor"] + ) + + cfgfit = cfgroot["fit"] + cfgfit["fitfunction"] = cfgfit.get("fitfunction", None) + if cfgfit["fitfunction"] is None: + if cfgfit["hypermetflag"]: + cfgfit["fitfunction"] = 0 + else: + cfgfit["fitfunction"] = 1 + cfgfit["linearfitflag"] = cfgfit.get("linearfitflag", 0) + cfgfit["fitweight"] = cfgfit.get("fitweight", 1) + cfgfit["deltaonepeak"] = cfgfit.get("deltaonepeak", 0.010) + + cfgfit["energy"], idx = self._normalizeEnergyListParam( + cfgfit.get("energy", None) + ) + cfgfit["energyweight"], _ = self._normalizeEnergyListParam( + cfgfit.get("energyweight"), idx=idx, default=1.0 + ) + cfgfit["energyflag"], _ = self._normalizeEnergyListParam( + cfgfit.get("energyweight"), idx=idx, default=1 + ) + cfgfit["energyscatter"], _ = self._normalizeEnergyListParam( + cfgfit.get("energyweight"), idx=idx, default=1 + ) + cfgfit["scatterflag"] = cfgfit.get("scatterflag", 0) + + cfgfit["stripalgorithm"] = cfgfit.get("stripalgorithm", 0) + cfgfit["snipwidth"] = cfgfit.get("snipwidth", 30) + cfgfit["linpolorder"] = cfgfit.get("linpolorder", 6) + cfgfit["exppolorder"] = cfgfit.get("exppolorder", 6) + cfgfit["stripconstant"] = cfgfit.get("stripconstant", 1.0) + cfgfit["stripwidth"] = int(cfgfit.get("stripwidth", 1)) + cfgfit["stripfilterwidth"] = int(cfgfit.get("stripfilterwidth", 5)) + cfgfit["stripiterations"] = int(cfgfit.get("stripiterations", 20000)) + cfgfit["stripanchorsflag"] = int(cfgfit.get("stripanchorsflag", 0)) + cfgfit["stripanchorslist"] = cfgfit.get("stripanchorslist", [0, 0, 0, 0]) + + cfgdetector = cfgroot["detector"] + cfgdetector["detene"] = cfgdetector.get("detene", 1.7420) + cfgdetector["ethreshold"] = cfgdetector.get("ethreshold", 0.020) + cfgdetector["nthreshold"] = cfgdetector.get("nthreshold", 4) + cfgdetector["ithreshold"] = cfgdetector.get("ithreshold", 1.0e-07) + + def _normalizeEnergyListParam(self, lst, idx=None, default=0): + if not isinstance(lst, list): + lst = [lst] + if idx is None: + idx = [ + i for i, v in enumerate(lst) if v and not isinstance(v, (str, bytes)) + ] + n = len(lst) + lst = [lst[i] if i < n else default for i in idx] + return lst, idx + + def _sourceLines(self): + """ + :yields tuple: (energy, weight, scatter) + """ + cfg = self.config["fit"] + scatterflag = cfg["scatterflag"] + for energy, flag, weight, scatter in zip( + cfg["energy"], + cfg["energyflag"], + cfg["energyweight"], + cfg["energyscatter"], + ): + if energy and flag: + yield energy, weight, scatter and scatterflag + + def _scatterLines(self): + """Source lines that are included in the fit model + + :yields tuple: (energy, weight) + """ + for energy, weight, scatter in self._sourceLines(): + if scatter and energy > self.SCATTER_ENERGY_THRESHOLD: + yield energy, weight + + @property + def _maxEnergy(self): + """ + :returns float or None: + """ + energies = [energy for energy, _, _ in self._sourceLines()] + if energies: + return max(energies) + else: + return None + + @property + def _nSourceLines(self): + """ + :returns int: + """ + return len(list(self._sourceLines())) + + @property + def _nRayleighLines(self): + """ + :returns int: + """ + return len(list(self._scatterLines())) + + def _attenuators( + self, + matrix=False, + detector=False, + detectorfilters=False, + beamfilters=False, + detectorfunnyfilters=False, + yielddisabled=False, + ): + """Does not yield anything by default + + :yields list: + """ + cfg = self.config["attenuators"] + for name, alist in cfg.items(): + if not alist[0] and not yielddisabled: + continue + name = name.upper() + if name == "MATRIX": + # Sample description + # enable, formula, density, thickness, anglein, angleout, usescatteringangle, scatteringangle + if matrix: + if len(alist) == 6: + alist += [0, alist[4] + alist[5]] + yield alist[1:] + elif name == "DETECTOR": + # Detector description + # enable, formula, density, thickness + if detector: + yield alist[1:] + else: + # Filter + # enable, formula, density, thickness, funny + if len(alist) == 4: + alist.append(1.0) + if name.startswith("BEAMFILTER"): + if beamfilters: + yield alist[1:] + elif abs(alist[4] - 1.0) > 1.0e-10: + if detectorfunnyfilters: + yield alist[1:] + else: + if detectorfilters: + yield alist[1:] + + @property + def _matrix(self): + """ + :returns list or None: [formula, density, thickness, anglein, angleout, usescatteringangle, scatteringangle] + """ + if not self.attflag: + return + lst = list(self._attenuators(matrix=True)) + if lst: + return lst[0] + + @property + def _angleIn(self): + """Angle between sample surface and primary beam""" + lst = list(self._attenuators(matrix=True, yielddisabled=True)) + if lst: + return lst[0][3] + else: + _logger.warning("Sample incident angle set to 45 deg.") + return 45.0 + + @property + def _angleOut(self): + """Angle between sample surface and outgoing beam (emission or scattering)""" + lst = list(self._attenuators(matrix=True, yielddisabled=True)) + if lst: + return lst[0][4] + else: + _logger.warning("Sample incident angle set to 45 deg.") + return 45.0 + + @property + def _scatteringAngle(self): + """Angle between primary beam and outgoing beam (emission or scattering)""" + lst = list(self._attenuators(matrix=True, yielddisabled=True)) + if lst: + if lst[0][5]: + return lst[0][6] + else: + return self._angleIn + self._angleOut + else: + _logger.warning("Scattering angle set to 90 deg.") + return 90.0 + + @property + def _detector(self): + """ + :returns list or None: [formula, density, thickness] + """ + if not self.attflag: + return + lst = list(self._attenuators(detector=True)) + if lst: + return lst[0] + + def _multilayer(self): + """Yields the sample layers + + :yields list: [formula, density, thickness] + """ + if not self.attflag: + return + matrix = self._matrix + if matrix[0].upper() == "MULTILAYER": + cfg = self.config["multilayer"] + layerkeys = list(cfg.keys()) + layerkeys.sort() + for layer in layerkeys: + alist = cfg[layer] + if alist[0]: + yield alist[1:] + else: + yield matrix + + def _userAttenuators(self): + """ + :yields dict: {"energy": [], "transmission": []} + """ + if not self.attflag: + return + cfg = self.config["userattenuators"] + for tableDict in cfg.values(): + if tableDict["use"]: + yield tableDict + + def _emissionGroups(self): + """ + :yields list: [Z, symb, linegroupname] + """ + cfg = self.config["peaks"] + for element, peaks in cfg.items(): + symb = element.capitalize() + Z = Elements.getz(symb) + if isinstance(peaks, list): + for peak in peaks: + yield [Z, symb, peak] + else: + yield [Z, symb, peaks] + + def _configureElementsModule(self): + """Configure the globals in the Elements module""" + for material, info in self.config["materials"].items(): + Elements.Material[material] = copy.deepcopy(info) + maxenergy = self._maxEnergy + for element in self.config["peaks"]: + symb = element.capitalize() + if maxenergy != Elements.Element[symb]["buildparameters"]["energy"]: + Elements.updateDict(energy=maxenergy) + break + + def configure(self, newdict=None): + if newdict: + self.config.update(newdict) + self._initializeConfig() + return copy.deepcopy(self.config) + + def _initializeConfig(self): + self._addMissingConfig() + self._configureElementsModule() + + @property + def _hypermet(self): + if self.config["fit"]["fitfunction"] == 0: + return self.config["fit"]["hypermetflag"] + else: + return 0 + + @property + def _hypermetGaussian(self): + return self._hypermet & 1 + + @property + def _hypermetShortTail(self): + return (self._hypermet >> 1) & 1 + + @property + def _hypermetLongTail(self): + return (self._hypermet >> 2) & 2 + + @property + def _hypermetStep(self): + return (self._hypermet >> 3) & 3 + + def _anchorsIndices(self): + cfg = self.config["fit"] + if not cfg["stripanchorsflag"] or not cfg["stripanchorslist"]: + return + ravelled = self.xdata + for channel in cfg["stripanchorslist"]: + if channel <= ravelled[0]: + continue + index = numpy.nonzero(ravelled >= channel)[0] + if len(index): + index = min(index) + if index > 0: + yield index + + +class McaTheoryLegacyApi: + def setdata(self, *args, **kw): + warnings.warn("McaTheory.setdata deprecated, please use setData", FutureWarning) + return self.setData(*args, **kw) + + @property + def sigmay(self): + return self.ystd + + @property + def sigmay0(self): + return self.ystd0 + + def startfit(self, *args, **kw): + warnings.warn( + "McaTheory.startfit deprecated, please use startFit", FutureWarning + ) + return self.startFit(*args, **kw) + + def setConfiguration(self, ddict): + """ + The current fit configuration dictionary is updated, but not replaced, + by the input dictionary. + It returns a copy of the final fit configuration. + """ + return self.configure(ddict) + + def getConfiguration(self): + """ + returns a copy of the current fit configuration parameters + """ + return self.configure() + + def getStartingConfiguration(self): + """ + returns a copy of the current fit configuration parameters + """ + return self.configure() + + def enableOptimizedLinearFit(self): + warnings.warn( + "McaTheory.enableOptimizedLinearFit is deprecated (does nothing)", + FutureWarning, + ) + + def disableOptimizedLinearFit(self): + warnings.warn( + "McaTheory.enableOptimizedLinearFit is deprecated (does nothing)", + FutureWarning, + ) + + @property + def codes(self): + return self.get_parameter_constraints() + + def specfitestimate(self, x, y, z, xscaling=1.0, yscaling=1.0): + warnings.warn( + "McaTheory.specfitestimate is deprecated (does nothing)", FutureWarning + ) + + +class McaTheoryDataApi(McaTheoryConfigApi): + """Add API for handling a single XRF spectrum (MCA data)""" + + def __init__(self, **kw): + # Original XRF spectrum + self._ydata0 = None + self._xdata0 = None + self._std0 = None + self._xmin0 = None + self._xmax0 = None + self._expotime0 = None + + # XRF spectrum to fit + self._ydata = None + self._xdata = None + self._std = None + self._lastDataCacheParams = None + + super().__init__(**kw) + + @property + def xdata(self): + """Sorted and sliced view of xdata0""" + self._refreshDataCache() + return self._xdata + + @property + def ydata(self): + """Sorted and sliced view of ydata0""" + self._refreshDataCache() + return self._ydata + + @property + def ystd(self): + """Sorted and sliced view of ystd0""" + self._refreshDataCache() + return self._ystd + + @property + def xdata0(self): + return self._xdata0 + + @property + def ydata0(self): + return self._ydata0 + + @property + def ystd0(self): + return self._ystd0 + + def setData(self, *var, **kw): + """ + Method to update the data to be fitted. + It accepts several combinations of input arguments, the simplest to + take into account is: + + setData(x, y, sigmay=None, xmin=None, xmax=None) + + x corresponds to the spectrum channels + y corresponds to the spectrum counts + sigmay is the uncertainty associated to the counts. If not given, + Poisson statistics will be assumed. If the fit configuration + is set to no weight, it will not be used. + xmin and xmax define the limits to be considered for performing the fit. + If the fit configuration flag self.config['fit']['use_limit'] is + set, they will be ignored. If xmin and xmax are not given, the + whole given spectrum will be considered for fitting. + time (seconds) is the factor associated to the flux, only used when using + a strategy based on concentrations + """ + if "y" in kw: + ydata0 = kw["y"] + elif len(var) > 1: + ydata0 = var[1] + elif len(var) == 1: + ydata0 = var[0] + else: + ydata0 = None + + if ydata0 is None: + return 1 + else: + ydata0 = numpy.ravel(ydata0) + + if "x" in kw: + xdata0 = kw["x"] + elif len(var) > 1: + xdata0 = var[0] + else: + xdata0 = None + + if xdata0 is None: + xdata0 = numpy.arange(len(ydata0)) + else: + xdata0 = numpy.ravel(xdata0) + + if "sigmay" in kw: + ystd0 = kw["sigmay"] + elif "stdy" in kw: + ystd0 = kw["stdy"] + elif len(var) > 2: + ystd0 = var[2] + else: + ystd0 = None + + if ystd0 is None: + # Poisson noise + valid = ydata0 > 0 + if valid.any(): + ystd0 = numpy.sqrt(abs(ydata0)) + ystd0[~valid] = ystd0[valid].min() + else: + ystd0 = numpy.ones_like(ydata0) + else: + ystd0 = numpy.ravel(ystd0) + + timeFactor = kw.get("time", None) + self._expotime0 = timeFactor + if timeFactor is None: + if self.config["concentrations"].get("useautotime", False): + if not self.config["concentrations"]["usematrix"]: + msg = "Requested to use time from data but not present!!" + raise ValueError(msg) + elif self.config["concentrations"].get("useautotime", False): + self.config["concentrations"]["time"] = timeFactor + + self._xmin0 = kw.get("xmin", self.xmin) + self._xmax0 = kw.get("xmax", self.xmax) + return self._refreshDataCache(xdata0=xdata0, ydata0=ydata0, ystd0=ystd0) + + @property + def xmin(self): + """From config (if enabled) or the last `setData` call""" + cfgfit = self.config["fit"] + if cfgfit["use_limit"]: + return cfgfit["xmin"] + else: + return self._xmin0 + + @property + def xmax(self): + """From config (if enabled) or the last `setData` call""" + cfgfit = self.config["fit"] + if cfgfit["use_limit"]: + return cfgfit["xmax"] + else: + return self._xmax0 + + def getLastTime(self): + return self._expotime0 + + @property + def _dataCacheParams(self): + """Any change in these parameter will invalidate the cache""" + return self.xmin, self.xmax + + def _refreshDataCache(self, xdata0=None, ydata0=None, ystd0=None): + """Cache sorted and sliced view of the original XRF spectrum data""" + params = self._dataCacheParams + if xdata0 is None and ydata0 is None and ystd0 is None: + if self._lastDataCacheParams == params: + return # the cached data is still valid + + # Original XRF spectrum + if xdata0 is None: + xdata0 = self.xdata0 + if xdata0 is None or not xdata0.size: + return 1 + if ydata0 is None: + ydata0 = self.ydata0 + if ydata0 is None or ydata0.size != xdata0.size: + return 1 + if ystd0 is None: + ystd0 = self.ystd0 + if ystd0 is None or ystd0.size != xdata0.size: + return 1 + + # XRF spectrum view + selection = numpy.isfinite(ydata0) + xmin = self.xmin + if xmin is not None: + selection &= xdata0 >= xmin + xmax = self.xmax + if xmax is not None: + selection &= xdata0 <= xmax + if not selection.any(): + return 1 + + # Cache the original XRF spectrum and its view + idx = numpy.argsort(xdata0)[selection] + self._xdata = xdata0[idx] + self._ydata = ydata0[idx] + self._ystd = ystd0[idx] + self._xdata0 = xdata0 + self._ydata0 = ydata0 + self._ystd0 = ystd0 + self._lastDataCacheParams = params + + +class McaTheoryBackground(McaTheoryDataApi): + """Model for the background of an XRF spectrum""" + + CONTINUUM_LIST = [ + None, + "Constant", + "Linear", + "Parabolic", + "Linear Polynomial", + "Exp. Polynomial", + ] + + def __init__(self, **kw): + # Numerical background + self._numBkg = None + self._lastNumBkgCacheParams = None + + # Analytical background + self._continuum = None + self._continuumModel = None + self._lastContinuumCacheParams = None + + super().__init__(**kw) + + @property + def hasNumBkg(self): + return self._ynumbkg is not None + + def ynumbkg(self, xdata=None): + """Get the numerical background (as opposed to the analytical background)""" + ybkg = self._ynumbkg + if ybkg is None: + if xdata is None: + return numpy.zeros(self.ndata) + else: + return numpy.zeros(len(xdata)) + if xdata is not None: + # The numerical background is calculated on self.xdata + # so we need to interpolate on xdata + try: + binterp = numpy.allclose(xdata, self.xdata) + except ValueError: + binterp = True + if binterp: + ybkg = numpy.interp(xdata, self.xdata, ybkg) + return ybkg + + @property + def hasContinuum(self): + return self.continuumModel is not None + + def ycontinuum(self, xdata=None): + """Get the analytical background (as opposed to the numerical background)""" + model = self.continuumModel + if model is None: + if xdata is None: + return numpy.zeros(self.ndata) + else: + return numpy.zeros(len(xdata)) + if xdata is not None: + xdata = self._channelsToXpol(xdata) + return model.evaluate_fullmodel(xdata=xdata) + + @property + def hasBackground(self): + return self.hasNumBkg or self.hasContinuum + + def ybackground(self, xdata=None): + """Get the total background""" + return self.ycontinuum(xdata=xdata) + self.ynumbkg(xdata=xdata) + + @property + def _ynumbkg(self): + self._refreshNumBkgCache() + return self._numBkg + + @property + def continuumModel(self): + self._refreshContinuumCache() + return self._continuumModel + + @property + def _numBkgCacheParams(self): + """Any change in these parameter will invalidate the cache""" + cfg = self.config["fit"] + params = [ + "stripflag", + "stripalgorithm", + "stripfilterwidth", + "stripanchorsflag", + "stripanchorslist", + ] + if cfg["stripalgorithm"] == 1: + params += ["snipwidth"] + else: + params += ["stripwidth", "stripconstant", "stripiterations"] + params = [cfg[p] for p in params] + params.append(id(self._lastDataCacheParams)) + return params + + def _refreshNumBkgCache(self): + """Cache numerical background""" + bkgparams = self._numBkgCacheParams + if self._lastNumBkgCacheParams == bkgparams: + return # the cached data is still valid + elif self.ydata is None: + self._numBkg = None + elif self.config["fit"]["stripflag"]: + signal = self._smooth(self.ydata) + anchorslist = list(self._anchorsIndices()) + if self.config["fit"]["stripalgorithm"] == 1: + self._numBkg = self._snip(signal, anchorslist) + else: + self._numBkg = self._strip(signal, anchorslist) + else: + self._numBkg = numpy.zeros_like(self.ydata) + self._lastNumBkgCacheParams = bkgparams + + @property + def continuum(self): + return self.config["fit"]["continuum"] + + @continuum.setter + def continuum(self, value): + self.config["fit"]["continuum_name"] = self.CONTINUUM_LIST[value] + self.config["fit"]["continuum"] = value + + @property + def continuum_name(self): + try: + return self.config["fit"]["continuum_name"] + except AttributeError: + return self.CONTINUUM_LIST[self.continuum] + + @continuum_name.setter + def continuum_name(self, name): + self.config["fit"]["continuum"] = self.CONTINUUM_LIST.index(name) + self.config["fit"]["continuum_name"] = name + + def _initializeConfig(self): + super()._initializeConfig() + self.continuum = self.continuum # verify continuum name and index + + @property + def _continuumCacheParams(self): + cfgfit = self.config["fit"] + continuum = self.continuum_name + params = [id(self._lastDataCacheParams), continuum] + if continuum == "Linear Polynomial": + params.append(cfgfit["linpolorder"]) + elif continuum == "Exp. Polynomial": + params.append(cfgfit["exppolorder"]) + return params + + def _refreshContinuumCache(self): + contparams = self._continuumCacheParams + if self._lastContinuumCacheParams == contparams: + return # the cached data is still valid + + # Instantiate the continuum model + continuum = self.continuum_name + if continuum is None: + model = None + elif continuum == "Constant": + model = LinearPolynomialModel(degree=0, maxiter=10) + elif continuum == "Linear": + model = LinearPolynomialModel(degree=1, maxiter=10) + elif continuum == "Parabolic": + model = LinearPolynomialModel(degree=2, maxiter=10) + elif continuum == "Linear Polynomial": + model = LinearPolynomialModel( + degree=self.config["fit"]["linpolorder"], maxiter=10 + ) + elif continuum == "Exp. Polynomial": + model = ExponentialPolynomialModel( + degree=self.config["fit"]["exppolorder"], maxiter=40 + ) + else: + raise ValueError("Unknown continuum {}".format(continuum)) + self._continuumModel = model + + # Estimate the polynomial coefficients by fitting the numerical background + if model is not None: + model.xdata = self.xpol + model.ydata = self.ynumbkg() + result = model.fit() + model.use_fit_result(result) + + self._lastContinuumCacheParams = contparams + + @property + def xpol(self): + return self._channelsToXpol(self.xdata) + + def _channelsToXpol(self, x): + raise NotImplementedError + + +class McaTheory(McaTheoryBackground, McaTheoryLegacyApi, LeastSquaresFitModel): + """Model for MCA data""" + + BAND_GAP = 0.00385 # keV, silicon + GAUSS_SIGMA_TO_FWHM = 2 * numpy.sqrt(2 * numpy.log(2)) # 2.3548 + FULL_ATTENUATION = 1.0e-300 # intensity assumed to be zero + SCATTER_ENERGY_THRESHOLD = 0.2 # keV + + def __init__(self, **kw): + # TODO: done for some initialization of SpecfitFuns? + SpecfitFuns.fastagauss([1.0, 10.0, 1.0], numpy.arange(10.0)) + self.useFisxEscape(flag=False, apply=False) + + # XRF line groups + self._nLineGroups = 0 + self._lineGroups = [] + self._lineGroupAreas = [] + self._fluoRates = [] + self._escapeLineGroups = [] + self._lastAreasCacheParams = None + + # Misc + self._last_fit_result = None + + super().__init__(**kw) + + def useFisxEscape(self, flag=None, apply=True): + """Make sure the model uses fisx to calculate the escape peaks + when possible. + """ + if flag and FISX: + if ConcentrationsTool.FisxHelper.xcom is None: + FisxHelper.xcom = xcom = FisxHelper.getElementsInstance() + else: + xcom = ConcentrationsTool.FisxHelper.xcom + if hasattr(xcom, "setEscapeCacheEnabled"): + xcom.setEscapeCacheEnabled(1) + self._useFisxEscape = True + else: + self._useFisxEscape = False + else: + self._useFisxEscape = False + if apply: + self.configure() + + def _initializeConfig(self): + super()._initializeConfig() + self.linearfitflag = self.config["fit"][ + "linearfitflag" + ] # done to synchronize parameter_types + self._preCalculateParameterIndependent() + self._preCalculateParameterDependent() + + def _preCalculateParameterDependent(self): + """Pre-calculate things that depend on the fit parameters""" + pass + + def _preCalculateParameterIndependent(self): + """Pre-calculate things that do not depend on the fit parameters""" + self._preCalculateLineGroups() + + def _preCalculateLineGroups(self): + """Calculate fluorescence and escape rates for emission and scatter line groups""" + self._fluoRates = self._calcFluoRates() + self._calcFluoRateCorrections() + + # Line groups: nested lists + # line group + # -> emission/scattering line + # [energy, rate, line name] + # This is a filtered and normalized form of `_fluoRates` + self._lineGroups = list(self._getEmissionLines()) + self._lineGroups.extend(self._getScatterLines()) + self._lineGroupNames = list(self._getEmissionLineNames()) + self._lineGroupNames.extend(self._getScatterNames()) + self._nLineGroups = len(self._lineGroups) + self._lineGroupAreas = numpy.ones(self._nLineGroups) + + # Escape line groups: nested lists + # line group + # -> emission/scattering line + # -> escape line + # [energy, rate, escape name] + self._escapeLineGroups = [ + self._calcEscapePeaks([peak[0] for peak in peaks]) + for peaks in self._lineGroups + ] + + def _peakProfileParams( + self, hypermet=None, selected_groups=None, normalized_fit_parameters=False + ): + """Raw peak profile parameters of emission/scatter/escape peaks. + All parameters are defined in the energy domain. + + :param int hypermet: + :param list or None selected_groups: all groups when `None` + no groups when empty list (npeaks == 0) + :param bool normalized_fit_parameters: + :returns array: npeaks x npeakparams + """ + lineGroups = self._lineGroups + escapeLineGroups = self._escapeLineGroups + linegroup_areas = self.linegroup_areas + if selected_groups is not None: + if not isinstance(selected_groups, (list, tuple)): + selected_groups = [selected_groups] + lineGroups = [lineGroups[i] for i in selected_groups] + escapeLineGroups = [escapeLineGroups[i] for i in selected_groups] + linegroup_areas = [linegroup_areas[i] for i in selected_groups] + if hypermet is None: + hypermet = self._hypermet + if normalized_fit_parameters: + linegroup_areas = numpy.ones(len(linegroup_areas)) + + npeaks = sum(len(group) for group in lineGroups) + npeaks += sum(len(escgroup) for group in escapeLineGroups for escgroup in group) + if hypermet: + # area, position, fwhm, ST AreaR, ST SlopeR, LT AreaR, LT SlopeR, STEP HeightR + npeakparams = 8 + else: + # area, position, fwhm, eta + npeakparams = 4 + parameters = numpy.zeros((npeaks, npeakparams)) + if not parameters.size: + return parameters + + # Peak positions and areas + i = 0 + for group, escgroup, grouparea in zip( + lineGroups, escapeLineGroups, linegroup_areas + ): + if not escgroup: + escgroup = [[]] * len(group) + for (energy, rate, _), esclines in zip(group, escgroup): + peakarea = rate * grouparea + parameters[i, 0] = peakarea + parameters[i, 1] = energy + i += 1 + for escen, escrate, _ in esclines: + parameters[i, 0] = peakarea * escrate + parameters[i, 1] = escen + i += 1 + + # Area parameters from channel to energy domain + parameters[:, 0] *= self.gain + + # FWHM in the energy domain + parameters[:, 2] = self._peakFWHM(parameters[:, 1]) + + # Other peak shape parameters + if hypermet: + if normalized_fit_parameters: + shapeparams = [ + 1, + self.st_sloperatio, + 1, + self.lt_sloperatio, + 1, + ] + else: + shapeparams = [ + self.st_arearatio, + self.st_sloperatio, + self.lt_arearatio, + self.lt_sloperatio, + self.step_heightratio, + ] + else: + shapeparams = [self.eta_factor] + for i, param in enumerate(shapeparams, 3): + parameters[:, i] = param + + return parameters + + @independent_linear_parameter_group + def linegroup_areas(self): + """line group areas in the channel domain""" + self._refreshAreasCache() + return self._lineGroupAreas + + @linegroup_areas.setter + def linegroup_areas(self, values): + self._lineGroupAreas[:] = values + + @linegroup_areas.counter + def linegroup_areas(self): + return self._nLineGroups + + @linegroup_areas.constraints + def linegroup_areas(self): + fixed = Gefit.CFIXED, 0, 0 + positive = Gefit.CPOSITIVE, 0, 0 + return [positive if area else fixed for area in self.linegroup_areas] + + @property + def linegroup_names(self): + return self._lineGroupNames + + def _refreshAreasCache(self, force=False): + params = self._areasCacheParams + if self._lastAreasCacheParams == params and not force: + return # the cached data is still valid + self._estimateLineGroupAreas() + self._lastAreasCacheParams = params + + @property + def _areasCacheParams(self): + """Any change in these parameter will invalidate the cache""" + zero = int(self.zero * 1000) + gain = int(self.gain * 1000) + return self._dataCacheParams + (zero, gain, id(self._lineGroupAreas)) + + def _estimateLineGroupAreas(self): + """Estimated line group areas in the channel domain""" + if self.hasBackground: + ydata = self.ydata - self.ybackground() + else: + ydata = self.ydata + xenergy = self.xenergy + emin = xenergy.min() + emax = xenergy.max() + # Height of a peak with rate Ri and group area A (Gaussian approx.) + # Hi(cts) = A(cts) * Ri / (sqrt(2*pi)*sigma(cts)) + # sigma(cts) = FWHM(keV) / (gain*GAUSS_SIGMA_TO_FWHM) + # A(cts) = Hi(cts) / Ri * sqrt(2*pi) * FWHM(keV) / (gain*GAUSS_SIGMA_TO_FWHM) + # A(cts) = Hi(cts) / Ri * FWHM(keV) * factor + factor = numpy.sqrt(2 * numpy.pi) / (self.GAUSS_SIGMA_TO_FWHM * self.gain) + + lineGroups = self._lineGroups + escapeLineGroups = self._escapeLineGroups + linegroup_areas = self._lineGroupAreas + for i, (group, escgroup) in enumerate(zip(lineGroups, escapeLineGroups)): + selected_energy, _ = self.__select_highest_peak(group, escgroup, emin, emax) + if selected_energy: + chan = numpy.abs(xenergy - selected_energy).argmin() + height = ydata[chan] # omit rate to approx. compensate for peak overlap + fwhm = self._peakFWHM(selected_energy) # energy domain + linegroup_areas[i] = height * fwhm * factor # Gaussian + else: # no peak within [emin, emax] + linegroup_areas[i] = 0 # fixed at zero (see constraints) + + def __select_highest_peak(self, group, escgroup, emin, emax): + if not escgroup: + escgroup = [[]] * len(group) + selected_energy = 0 + selected_rate = 0 + for (peakenergy, rate, _), esclines in zip(group, escgroup): + if peakenergy >= emin and peakenergy <= emax: + if rate > selected_rate: + selected_energy = peakenergy + selected_rate = rate + for peakenergy, escrate, _ in esclines: + if peakenergy >= emin and peakenergy <= emax: + if rate * escrate > selected_rate: + selected_energy = peakenergy + selected_rate = rate * escrate + return selected_energy, selected_rate + + def klm_markers(self, xenergy=None): + if xenergy is None: + xenergy = self.xenergy + emin = xenergy.min() + emax = xenergy.max() + factor = numpy.sqrt(2 * numpy.pi) / self.GAUSS_SIGMA_TO_FWHM / self.gain + + lineGroups = self._lineGroups + escapeLineGroups = self._escapeLineGroups + linegroup_areas = self.linegroup_areas + linegroup_names = self.linegroup_names + + for group, escgroup, label, area in zip( + lineGroups, escapeLineGroups, linegroup_names, linegroup_areas + ): + if not escgroup: + escgroup = [[]] * len(group) + peakenergy, rate = self.__select_highest_peak(group, escgroup, emin, emax) + if not peakenergy: + continue # no peak within [emin, emax] + fwhm = self._peakFWHM(peakenergy) # energy domain + height = (rate * area) / (fwhm * factor) + yield peakenergy, height, label + for (peakenergy, rate, _), esclines in zip(group, escgroup): + fwhm = self._peakFWHM(peakenergy) # energy domain + height = (rate * area) / (fwhm * factor) + yield peakenergy, height, None + for peakenergy, escrate, _ in esclines: + fwhm = self._peakFWHM(peakenergy) # energy domain + height = (rate * escrate * area) / (fwhm * factor) + yield peakenergy, height, None + + def plot(self, title=None, markers=False): + import matplotlib.pyplot as plt + + plt.plot(self.xenergy, self.yfitdata, label="data") + plt.plot(self.xenergy, self.yfitmodel, label="fit") + plt.plot(self.xenergy, self.ypileup(), label="pileup") + if markers: + from matplotlib.pyplot import cm + + colors = iter(cm.rainbow(numpy.linspace(0, 1, self._nLineGroups))) + for energy, height, label in self.klm_markers(): + if label: + color = next(colors) + plt.text(energy, height, label, color=color) + plt.plot([energy, energy], [0, height], label=label, color=color) + + plt.yscale("log") + plt.ylim(1, None) + + plt.legend() + if title: + plt.title(title) + plt.show() + + def estimate(self): + self._refreshAreasCache(force=True) + + def _evaluatePeakProfiles( + self, + xdata=None, + hypermet=None, + fast=True, + selected_groups=None, + normalized_fit_parameters=False, + ): + """Summed peak profiles of emission/scatter/escape peaks + + :param array xdata: 1D array + :param int or None hypermet: + :param bool fast: use lookup table for calculating exponentials + :param bool selected_groups: + :param bool normalized_fit_parameters: + :returns array: same shape as x + """ + parameters = self._peakProfileParams( + hypermet=hypermet, + selected_groups=selected_groups, + normalized_fit_parameters=normalized_fit_parameters, + ) + if parameters.size == 0: + if xdata is None: + return numpy.zeros(self.ndata) + else: + return numpy.zeros(len(xdata)) + if xdata is None: + xdata = self.xdata + energy = self._channelsToEnergy(xdata) + if hypermet is None: + hypermet = self._hypermet + if hypermet: + if fast: + return SpecfitFuns.fastahypermet(parameters, energy, hypermet) + else: + return SpecfitFuns.ahypermet(parameters, energy, hypermet) + else: + return SpecfitFuns.apvoigt(parameters, energy) + + def _peakFWHM(self, energy): + """Calculate the FWHM of a peak in the energy domain""" + return numpy.sqrt( + self.noise * self.noise + + self.BAND_GAP + * energy + * self.fano + * self.GAUSS_SIGMA_TO_FWHM + * self.GAUSS_SIGMA_TO_FWHM + ) + + def _getEmissionLines(self): + """Yields a list of emission lines for each group with total + rate of 1 and sorted by energy. + + :yields list: [[energy, rate, "name"], + [energy, rate, "name"], + ...] + """ + for group in sorted(self._emissionGroups()): + yield self._getGroupEmissionLines(*group) + + def _getEmissionLineNames(self): + for _, symb, line in sorted(self._emissionGroups()): + yield symb + " " + line + + def _getScatterLines(self): + """Yields a list for scattering lines for each source line. + + :yields list: [[energy, 1.0, "Scatter %03d"]] + """ + scatteringAngle = numpy.radians(self._scatteringAngle) + angleFactor = 1.0 - numpy.cos(scatteringAngle) + for i, (en_elastic, _) in enumerate(self._scatterLines()): + en_inelastic = en_elastic / (1.0 + (en_elastic / 511.0) * angleFactor) + yield [[en_elastic, 1.0, "Scatter %03d" % i]] + yield [[en_inelastic, 1.0, "Scatter %03d" % i]] + + def _getScatterNames(self): + for i in range(self._nRayleighLines): + yield "elastic" + str(i) + yield "inelastic" + str(i) + + def _calcFluoRates(self): + """Fluorescence rate for each emission line of each element. + Rate means fluorescence intensity divided by primary intensity. + + :returns None or dict: + """ + if self._matrix: + if self._maxEnergy: + multilayer = list(self._multilayer()) + if not multilayer: + text = "Your matrix is not properly defined.\n" + text += "If you used the graphical interface,\n" + text += "Please check the MATRIX tab" + raise ValueError(text) + + emissiongroups = sorted(self._emissionGroups()) + energylist, weightlist, scatterlist = zip(*self._sourceLines()) + detector = self._detector + attenuatorlist = list(self._attenuators(detectorfilters=True)) + userattenuatorlist = list(self._userAttenuators()) + funnyfilters = list(self._attenuators(detectorfunnyfilters=True)) + filterlist = list(self._attenuators(beamfilters=True)) + alphain = self._angleIn + alphaout = self._angleOut + return Elements.getMultilayerFluorescence( + multilayer, + energylist, + layerList=None, + weightList=weightlist, + fulloutput=1, + attenuators=attenuatorlist, + alphain=alphain, + alphaout=alphaout, + elementsList=emissiongroups, + cascade=True, + detector=detector, + funnyfilters=funnyfilters, + beamfilters=filterlist, + forcepresent=1, + userattenuators=userattenuatorlist, + ) + else: + text = "Invalid energy for matrix configuration.\n" + text += "Please check your BEAM parameters." + raise ValueError(text) + else: + if self._nSourceLines > 1: + raise ValueError("Multiple energies require a matrix definition") + else: + return None + + def _calcFluoRateCorrections(self): + """Higher-order interaction corrections on the fluorescence rates. + This will not be needed once fisx replaces the Elements module. + """ + fisxcfg = self.config["fisx"] = {} + if not FISX: + return + secondary = self.config["concentrations"].get("usemultilayersecondary", False) + if secondary: + corrections = FisxHelper.getFisxCorrectionFactorsFromFitConfiguration( + self.config, elementsFromMatrix=False + ) + fisxcfg["corrections"] = corrections + fisxcfg["secondary"] = secondary + + def _getGroupEmissionLines(self, Z, symb, groupname): + """Return a list of emission lines with total rate of 1 and + sorted by energy. + + :param int Z: atomic number + :param str symb: for example "Fe" + :param str groupname: for example "K" + :returns list: [[energy, rate, "name"], + [energy, rate, "name"], + ...] + """ + if self._fluoRates is None: + groups = Elements.Element[symb] + else: + groups = self._fluoRates[0][symb] + + peaks = [] + lines = groups.get(groupname + " xrays", dict()) + if not lines: + return peaks + for line in lines: + lineinfo = groups[line] + if lineinfo["rate"] > 0.0: + peaks.append([lineinfo["energy"], lineinfo["rate"], line]) + + if self._fluoRates is None: + self._applyAttenuation(peaks, symb) + + totalrate = sum(peak[1] for peak in peaks) + if not totalrate: + text = "Intensity of %s %s is zero\n" % (symb, groupname) + text += "Too high attenuation?" + raise ZeroDivisionError(text) + for peak in peaks: + peak[1] /= totalrate + + ethreshold = self.config["fit"]["deltaonepeak"] + return Elements._filterPeaks( + peaks, + ethreshold=ethreshold, + ithreshold=0.0005, + nthreshold=None, + keeptotalrate=True, + ) + + def _applyAttenuation(self, peaks, symb): + """Apply attenuation of primary and secondary beams. + No high-order interactions are taken into account. + Only 1 primary beam energy can be used. + """ + self._applyMatrixAttenuation(peaks, symb) + self._applyBeamFilterAttenuation(peaks, symb) + self._applyDetectorFilterAttenuation(peaks, symb) + self._applyFunnyFilterAttenuation(peaks, symb) + self._applyDetectorAttenuation(peaks, symb) + for peak in peaks: + if peak[1] < self.FULL_ATTENUATION: + peak[1] = 0 + + def _iterLinearAttenuation(self, energies, **kw): + """Linear attenuation coefficients of matrix, detector, filters, ... + + :param list energies: + :param **kw: select the attenuator type to include + """ + for attenuator in self._attenuators(**kw): + formula, density, thickness, funnyfactor = attenuator + rhod = density * thickness + mu = Elements.getMaterialMassAttenuationCoefficients(formula, 1.0, energies) + if len(energies) != 1 and len(mu["total"]) == 1: + mu = mu["total"] * len(energies) + else: + mu = mu["total"] + mulin = rhod * numpy.array(mu) + yield mulin, funnyfactor + + def _applyBeamFilterAttenuation(self, peaks, symb): + energies = Elements.Element[symb]["buildparameters"]["energy"] + if not energies: + raise ValueError("Invalid excitation energy") + + for mulin, _ in self._iterLinearAttenuation(energies, beamfilter=True): + transmission = numpy.exp(-mulin) + for peak, frac in zip(peaks, transmission): + peak[1] *= frac + + def _applyDetectorFilterAttenuation(self, peaks, symb): + energies = [peak[0] for peak in peaks] + for mulin, _ in self._iterLinearAttenuation(energies, detectorfilter=True): + transmission = numpy.exp(-mulin) + for peak, frac in zip(peaks, transmission): + peak[1] *= frac + + def _applyFunnyFilterAttenuation(self, peaks, symb): + firstfunnyfactor = None + energies = [peak[0] for peak in peaks] + for mulin, funnyfactor in self._iterLinearAttenuation( + energies, detectorfunnyfilter=True + ): + if (funnyfactor < 0.0) or (funnyfactor > 1.0): + text = ( + "Funny factor should be between 0.0 and 1.0., got %g" % funnyfactor + ) + raise ValueError(text) + transmission = numpy.exp(-mulin) + if firstfunnyfactor is None: + # only has to be multiplied once!!! + firstfunnyfactor = funnyfactor + transmission = funnyfactor * transmission + (1.0 - funnyfactor) + else: + if abs(firstfunnyfactor - funnyfactor) > 0.0001: + text = "All funny type attenuators must have same opening fraction" + raise ValueError(text) + for peak, frac in zip(peaks, transmission): + peak[1] *= frac + + def _applyDetectorAttenuation(self, peaks, symb): + energies = [peak[0] for peak in peaks] + for mulin, _ in self._iterLinearAttenuation(energies, symb, detector=True): + attenuation = 1.0 - numpy.exp(-mulin) + for peak, frac in zip(peaks, attenuation): + peak[1] *= frac + + def _applyMatrixAttenuation(self, peaks, symb): + matrix = self._matrix + if not matrix: + return + maxenergy = Elements.Element[symb]["buildparameters"]["energy"] + if not maxenergy: + raise ValueError("Invalid excitation energy") + formula, density, thickness = matrix[:3] + alphaIn = self._angleIn + alphaOut = self._angleOut + + energies = [x[0] for x in peaks] + [maxenergy] + mu = Elements.getMaterialMassAttenuationCoefficients(formula, 1.0, energies) + sinAlphaIn = numpy.sin(numpy.radians(alphaIn)) + sinAlphaOut = numpy.sin(numpy.radians(alphaOut)) + sinRatio = sinAlphaIn / sinAlphaOut + muSource = mu["total"][-1] + muFluo = numpy.array(mu["total"][:-1]) + + transmission = 1.0 / (muSource + muFluo * sinRatio) + rhod = density * thickness + if rhod > 0.0 and abs(sinAlphaIn) > 0.0: + expterm = -(muSource / sinAlphaIn + muFluo / sinAlphaOut) * rhod + transmission *= 1.0 - numpy.exp(expterm) + + for peak, frac in zip(peaks, transmission): + peak[1] *= frac + + def _applyUserAttenuators(self, peaks): + for userattenuator in self.config["userattenuators"]: + if self.config["userattenuators"][userattenuator]["use"]: + transmission = Elements.getTableTransmission( + self.config["userattenuators"][userattenuator], + [x[0] for x in peaks], + ) + for peak, frac in zip(peaks, transmission): + peak[1] *= frac + + def _calcEscapePeaks(self, energies): + """For each energy a list of escape peaks with total rate of 1 + and sorted by energy. + + :param list energies: + :returns list: [[[energy, rate, "name"], + [energy, rate, "name"], + ...]] + """ + if not self.config["fit"]["escapeflag"]: + return [] + if self._useFisxEscape: + _logger.debug("Using fisx escape ratio's") + return self._calcFisxEscapeRatios(energies) + else: + return self._calcPymcaEscapeRatios(energies) + + def _calcFisxEscapeRatios(self, energies): + xcom = FisxHelper.xcom + detele = self.config["detector"]["detele"] + detector_composition = Elements.getMaterialMassFractions([detele], [1.0]) + ethreshold = self.config["detector"]["ethreshold"] + ithreshold = self.config["detector"]["ithreshold"] + nthreshold = self.config["detector"]["nthreshold"] + xcom.updateEscapeCache( + detector_composition, + energies, + energyThreshold=ethreshold, + intensityThreshold=ithreshold, + nThreshold=nthreshold, + ) + + escape_peaks = [] + for energy in energies: + epeaks = xcom.getEscape( + detector_composition, + energy, + energyThreshold=ethreshold, + intensityThreshold=ithreshold, + nThreshold=nthreshold, + ) + epeaks = [ + [epeakinfo["energy"], epeakinfo["rate"], name[:-3].replace("_", " ")] + for name, epeakinfo in epeaks.items() + ] + epeaks = Elements._filterPeaks( + epeaks, + ethreshold=ethreshold, + ithreshold=ithreshold, + nthreshold=nthreshold, + absoluteithreshold=True, + keeptotalrate=False, + ) + escape_peaks.append(epeaks) + return escape_peaks + + def _calcPymcaEscapeRatios(self, energies): + escape_peaks = [] + detele = self.config["detector"]["detele"] + for energy in energies: + peaks = Elements.getEscape( + [detele, 1.0, 1.0], + energy, + ethreshold=self.config["detector"]["ethreshold"], + ithreshold=self.config["detector"]["ithreshold"], + nthreshold=self.config["detector"]["nthreshold"], + ) + escape_peaks.append(peaks) + return escape_peaks + + @property + def xenergy(self): + return self._channelsToEnergy(self.xdata) + + def _channelsToEnergy(self, x): + return self.zero + self.gain * x + + def _channelsToXpol(self, x): + return self.zero + self.gain * (x - x.mean()) + + @independent_linear_parameter_group + def linpol_coefficients(self): + if isinstance(self.continuumModel, LinearPolynomialModel): + return self.continuumModel.coefficients + else: + return list() + + @linpol_coefficients.setter + def linpol_coefficients(self, values): + if isinstance(self.continuumModel, LinearPolynomialModel): + self.continuumModel.coefficients = values + + @linpol_coefficients.counter + def linpol_coefficients(self): + continuum = self.continuum_name + if continuum is None: + return 0 + elif continuum == "Constant": + return 1 + elif continuum == "Linear": + return 2 + elif continuum == "Parabolic": + return 3 + elif continuum == "Linear Polynomial": + return self.config["fit"]["linpolorder"] + 1 + else: + return 0 + + @nonlinear_parameter_group + def exppol_coefficients(self): + model = self.continuumModel + if isinstance(model, ExponentialPolynomialModel): + return model.coefficients + else: + return list() + + @exppol_coefficients.setter + def exppol_coefficients(self, values): + model = self.continuumModel + if isinstance(model, ExponentialPolynomialModel): + model.coefficients = values + + @exppol_coefficients.counter + def exppol_coefficients(self): + if self.continuum_name == "Exp. Polynomial": + return self.config["fit"]["exppolorder"] + 1 + else: + return 0 + + def _snip(self, signal, anchorslist): + """Apply SNIP filtering to a signal""" + _logger.debug("CALCULATING SNIP") + n = len(signal) + if len(anchorslist): + anchorslist.sort() + else: + anchorslist = [0, n - 1] + + bkg = 0.0 * signal + lastAnchor = 0 + cfg = self.config["fit"] + width = cfg["snipwidth"] + for anchor in anchorslist: + if (anchor > lastAnchor) and (anchor < len(signal)): + bkg[lastAnchor:anchor] = SpecfitFuns.snip1d( + signal[lastAnchor:anchor], width, 0 + ) + lastAnchor = anchor + if lastAnchor < len(signal): + bkg[lastAnchor:] = SpecfitFuns.snip1d(signal[lastAnchor:], width, 0) + return bkg + + def _strip(self, signal, anchorslist): + """Apply STRIP filtering to a signal""" + cfg = self.config["fit"] + niter = cfg["stripiterations"] + if niter <= 0: + return numpy.full_like(signal, signal.min()) + + _logger.debug("CALCULATING STRIP") + if (niter > 1000) and (cfg["stripwidth"] == 1): + bkg = SpecfitFuns.subac( + signal, cfg["stripconstant"], niter / 20, 4, anchorslist + ) + bkg = SpecfitFuns.subac( + bkg, cfg["stripconstant"], niter / 4, cfg["stripwidth"], anchorslist + ) + else: + bkg = SpecfitFuns.subac( + signal, cfg["stripconstant"], niter, cfg["stripwidth"], anchorslist + ) + if niter > 1000: + # make sure to get something smooth + bkg = SpecfitFuns.subac(bkg, cfg["stripconstant"], 500, 1, anchorslist) + else: + # make sure to get something smooth but with less than + # 500 iterations + bkg = SpecfitFuns.subac( + bkg, + cfg["stripconstant"], + int(cfg["stripwidth"] * 2), + 1, + anchorslist, + ) + return bkg + + def _smooth(self, y): + """Smooth a signal""" + try: + y = y.astype(dtype=numpy.float64) + w = self.config["fit"]["stripfilterwidth"] + ysmooth = SpecfitFuns.SavitskyGolay(y, w) + except Exception: + print("Unsuccessful Savitsky-Golay smoothing: %s" % sys.exc_info()) + raise + if ysmooth.size > 1: + fltr = [0.25, 0.5, 0.25] + ysmooth[1:-1] = numpy.convolve(ysmooth, fltr, mode=0) + ysmooth[0] = 0.5 * (ysmooth[0] + ysmooth[1]) + ysmooth[-1] = 0.5 * (ysmooth[-1] + ysmooth[-2]) + return ysmooth + + @nonlinear_parameter_group + def zero(self): + return self.config["detector"]["zero"] + + @zero.setter + def zero(self, value): + self.config["detector"]["zero"] = value + + @zero.constraints + def zero(self): + if self.config["detector"]["fixedzero"]: + return Gefit.CFIXED, 0, 0 + else: + value = self.zero + delta = self.config["detector"]["deltazero"] + return Gefit.CQUOTED, value - delta, value + delta + + @nonlinear_parameter_group + def gain(self): + return self.config["detector"]["gain"] + + @gain.setter + def gain(self, value): + self.config["detector"]["gain"] = value + + @gain.constraints + def gain(self): + if self.config["detector"]["fixedgain"]: + return Gefit.CFIXED, 0, 0 + else: + value = self.gain + delta = self.config["detector"]["deltagain"] + return Gefit.CQUOTED, value - delta, value + delta + + @nonlinear_parameter_group + def noise(self): + return self.config["detector"]["noise"] + + @noise.setter + def noise(self, value): + self.config["detector"]["noise"] = value + + @noise.constraints + def noise(self): + if self.config["detector"]["fixednoise"]: + return Gefit.CFIXED, 0, 0 + else: + value = self.noise + delta = self.config["detector"]["deltanoise"] + return Gefit.CQUOTED, value - delta, value + delta + + @nonlinear_parameter_group + def fano(self): + return self.config["detector"]["fano"] + + @fano.setter + def fano(self, value): + self.config["detector"]["fano"] = value + + @fano.constraints + def fano(self): + if self.config["detector"]["fixedfano"]: + return Gefit.CFIXED, 0, 0 + else: + value = self.fano + delta = self.config["detector"]["deltafano"] + return Gefit.CQUOTED, value - delta, value + delta + + @dependent_linear_parameter_group + def sum(self): + if self.config["fit"]["sumflag"]: + return self.config["detector"]["sum"] + else: + return 0.0 + + @sum.setter + def sum(self, value): + self.config["detector"]["sum"] = value + + @sum.constraints + def sum(self): + if self.config["detector"]["fixedsum"] or not self.config["fit"]["sumflag"]: + return Gefit.CFIXED, 0, 0 + else: + value = self.sum + delta = self.config["detector"]["deltasum"] + return Gefit.CQUOTED, value - delta, value + delta + + @nonlinear_parameter_group + def eta_factor(self): + return self.config["peakshape"]["eta_factor"] + + @eta_factor.setter + def eta_factor(self, value): + self.config["peakshape"]["eta_factor"] = value + + @eta_factor.counter + def eta_factor(self): + if self._hypermet: + return 0 + else: + return 1 + + @eta_factor.constraints + def eta_factor(self): + if self.config["peakshape"]["fixedeta_factor"]: + return Gefit.CFIXED, 0, 0 + else: + value = self.eta_factor + delta = self.config["peakshape"]["deltaeta_factor"] + return Gefit.CQUOTED, value - delta, value + delta + + @dependent_linear_parameter_group + def step_heightratio(self): + if self._hypermetStep: + return self.config["peakshape"]["step_heightratio"] + else: + return 0 + + @step_heightratio.setter + def step_heightratio(self, value): + self.config["peakshape"]["step_heightratio"] = value + + @step_heightratio.counter + def step_heightratio(self): + if self._hypermet: + return 1 + else: + return 0 + + @step_heightratio.constraints + def step_heightratio(self): + if self.config["peakshape"]["fixedstep_heightratio"] or not self._hypermetStep: + return Gefit.CFIXED, 0, 0 + else: + value = self.step_heightratio + delta = self.config["peakshape"]["deltastep_heightratio"] + return Gefit.CQUOTED, value - delta, value + delta + + @nonlinear_parameter_group + def lt_sloperatio(self): + return self.config["peakshape"]["lt_sloperatio"] + + @lt_sloperatio.setter + def lt_sloperatio(self, value): + self.config["peakshape"]["lt_sloperatio"] = value + + @lt_sloperatio.counter + def lt_sloperatio(self): + if self._hypermet: + return 1 + else: + return 0 + + @lt_sloperatio.constraints + def lt_sloperatio(self): + if self.config["peakshape"]["fixedlt_sloperatio"] or not self._hypermetLongTail: + return Gefit.CFIXED, 0, 0 + else: + value = self.lt_sloperatio + delta = self.config["peakshape"]["deltalt_sloperatio"] + return Gefit.CQUOTED, value - delta, value + delta + + @dependent_linear_parameter_group + def lt_arearatio(self): + if self._hypermetLongTail: + return self.config["peakshape"]["lt_arearatio"] + else: + return 0 + + @lt_arearatio.setter + def lt_arearatio(self, value): + self.config["peakshape"]["lt_arearatio"] = value + + @lt_arearatio.counter + def lt_arearatio(self): + if self._hypermet: + return 1 + else: + return 0 + + @lt_arearatio.constraints + def lt_arearatio(self): + if self.config["peakshape"]["fixedlt_arearatio"] or not self._hypermetLongTail: + return Gefit.CFIXED, 0, 0 + else: + value = self.lt_arearatio + delta = self.config["peakshape"]["deltalt_arearatio"] + return Gefit.CQUOTED, value - delta, value + delta + + @dependent_linear_parameter_group + def st_sloperatio(self): + return self.config["peakshape"]["st_sloperatio"] + + @st_sloperatio.setter + def st_sloperatio(self, value): + self.config["peakshape"]["st_sloperatio"] = value + + @st_sloperatio.counter + def st_sloperatio(self): + if self._hypermet: + return 1 + else: + return 0 + + @st_sloperatio.constraints + def st_sloperatio(self): + if ( + self.config["peakshape"]["fixedst_sloperatio"] + or not self._hypermetShortTail + ): + return Gefit.CFIXED, 0, 0 + else: + value = self.st_sloperatio + delta = self.config["peakshape"]["deltast_sloperatio"] + return Gefit.CQUOTED, value - delta, value + delta + + @dependent_linear_parameter_group + def st_arearatio(self): + if self._hypermetShortTail: + return self.config["peakshape"]["st_arearatio"] + else: + return 0 + + @st_arearatio.setter + def st_arearatio(self, value): + self.config["peakshape"]["st_arearatio"] = value + + @st_arearatio.counter + def st_arearatio(self): + if self._hypermet: + return 1 + else: + return 0 + + @st_arearatio.constraints + def st_arearatio(self): + if self.config["peakshape"]["fixedst_arearatio"] or not self._hypermetShortTail: + return Gefit.CFIXED, 0, 0 + else: + value = self.st_arearatio + delta = self.config["peakshape"]["deltast_arearatio"] + return Gefit.CQUOTED, value - delta, value + delta + + def _convert_parameter_names(self, names): + linegroup_names = None + for name in names: + if "linegroup_areas" in name: + if linegroup_names is None: + linegroup_names = self.linegroup_names + idx = int(name.replace("linegroup_areas", "")) + yield self._convert_line_name(linegroup_names[idx]) + elif "linpol_coefficients" in name: + if self.continuum < self.CONTINUUM_LIST.index("Linear Polynomial"): + idx = int(name.replace("linpol_coefficients", "")) + if idx == 0: + yield "Constant" + elif idx == 1: + yield "1st Order" + elif idx == 2: + yield "2nd Order" + else: + yield name.replace("linpol_coefficients", "A") + elif "exppol_coefficients" in name: + yield name.replace("linpol_coefficients", "A") + elif name == "st_arearatio": + yield "ST AreaR" + elif name == "st_sloperatio": + yield "ST SlopeR" + elif name == "lt_arearatio": + yield "LT AreaR" + elif name == "lt_sloperatio": + yield "LT SlopeR" + elif name == "step_heightratio": + yield "STEP HeightR" + elif name == "eta_factor": + yield "Eta Factor" + else: + yield name.capitalize() + + def _convert_line_name(self, name): + if "inelastic" in name: + i = int(name.replace("inelastic", "")) + return "Scatter Compton%03d" % i + elif "elastic" in name: + i = int(name.replace("elastic", "")) + return "Scatter Peak%03d" % i + return name + + def get_parameter_names(self, **paramtype): + return tuple( + self._convert_parameter_names(super().get_parameter_names(**paramtype)) + ) + + def evaluate_fitmodel(self, xdata=None): + return self.mcatheory(xdata=xdata) + + def mcatheory( + self, + xdata=None, + hypermet=None, + continuum=True, + summing=True, + selected_groups=None, + normalized_fit_parameters=False, + ): + """Evaluate to MCA model (does not include the numerical background) + + y(x) = ycont(P(x)) + A1*F1(E(x)) + A2*F2(E(x)) + ... + + x: MCA channels (positive integers) + + ycont(x) = 0 # no analytical background + = c0 + c1*x + c2*x^2 + ... # linear polynomial + = c0 * exp[c1*x + c2*x^2 + ...] # exponential polynomial + + E(x) = zero + gain*x + P(x) = E(x - ) + + Fi(x): emission, scatter or escape peak + + Hypermet: + + A*F(x) = A * Gnorm(x, u, s) + + st_arearatio*A * Tnorm(x, u, s, st_sloperatio) + + lt_arearatio*A * Tnorm(x, u, s, lt_sloperatio) + + step_heightratio*A * u/(sqrt(2*pi)*s) * Snorm(x, u, s) + + A: the area of the gaussian part, not the entire peak + + Gaussian is normalized in ]-inf, inf[ + Gnorm(x, u, s) = exp[-(x-u)^2/(2*s^2)] / (sqrt(2*pi)*s) + + Step is normalized in [0, inf[ + Snorm(x, u, s) = erfc[(x-u)/(sqrt(2)*s)] / (2 * u) + + Tail is normalized in ]-inf, inf[ + Tnorm(x, u, s, r) = erfc[(x-u)/(sqrt(2)*s) + s/(sqrt(2)*r)] + * exp[(x-u)/r + (s/r)^2/2] / (2 * r) + + :param array xdata: + :param int hypermet: + :param bool continuum: + :param bool summing: + :param list selected_groups: + :param bool normalized_fit_parameters: + :returns array: + """ + # Emission lines, scatter peaks and escape peaks + y = self._evaluatePeakProfiles( + xdata=xdata, + hypermet=hypermet, + selected_groups=selected_groups, + normalized_fit_parameters=normalized_fit_parameters, + ) + + # Analytical background + if continuum and self.continuumModel is not None: + y += self.ycontinuum(xdata=xdata) + + # Pile-up + if summing and self.sum: + y += self.ypileup(ymodel=y, xdata=xdata) + + return y + + def ypileup(self, ymodel=None, xdata=None, normalized_fit_parameters=False): + """The ymodel contains the peaks and the continuum.""" + pileupfactor = self.sum + if not pileupfactor: + if xdata is None: + return numpy.zeros(self.ndata) + else: + return numpy.zeros(len(xdata)) + if ymodel is None: + ymodel = self.mcatheory(xdata=xdata, summing=False) + if xdata is None: + xmin = self.xmin + else: + xmin = min(xdata) + if normalized_fit_parameters: + pileupfactor = 1 + return pileupfactor * SpecfitFuns.pileup( + ymodel, int(xmin), self.zero, self.gain, 0 + ) + + def _y_full_to_fit(self, y, xdata=None): + """The fitting is done after subtracting the numerical background""" + if self.hasNumBkg: + y = y - self.ynumbkg(xdata=xdata) + if self.parameter_types == ParameterType.independent_linear and self.hasPileUp: + # Note: this is not strictly correct but we have + # no other choice until the pileup is implemented + # properly (emission line combinations instead of convolution) + y = y - self.ypileup(xdata=xdata) + return y + + @property + def hasPileUp(self): + return bool(self.sum) + + def _y_fit_to_full(self, y, xdata=None): + """The numerical background is not included in the fit model""" + if self.hasNumBkg: + return y + self.ynumbkg(xdata=xdata) + else: + return y + + def derivative_fitmodel(self, param_idx, xdata=None, **paramtype): + """Derivate to a specific nonlinear_parameter_group + + :param int param_idx: + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + group = self._group_from_parameter_index(param_idx, **paramtype) + name = group.property_name + if name == "st_arearatio": + return self.mcatheory( + xdata=xdata, hypermet=2, continuum=False, normalized_fit_parameters=True + ) + elif name == "lt_arearatio": + return self.mcatheory( + xdata=xdata, hypermet=4, continuum=False, normalized_fit_parameters=True + ) + elif name == "step_heightratio": + return self.mcatheory( + xdata=xdata, hypermet=8, continuum=False, normalized_fit_parameters=True + ) + elif name == "linegroup_areas": + i = group.parameter_index_in_group(param_idx) + return self.mcatheory( + selected_groups=[i], + normalized_fit_parameters=True, + xdata=xdata, + continuum=False, + ) + elif name == "sum": + return self.ypileup(xdata=xdata, normalized_fit_parameters=True) + elif name == "linpol_coefficients": + model = self.continuumModel + i = group.parameter_index_in_group(param_idx) + xpol = xdata + if xpol is not None: + xpol = self._channelsToXpol(xpol) + deriv = model.derivative_fitmodel(i, xdata=xpol, **paramtype) + if self.sum: + deriv = deriv + self.ypileup(ymodel=deriv, xdata=xdata) + return deriv + else: + return self.numerical_derivative_fitmodel( + param_idx, xdata=xdata, **paramtype + ) + + @property + def maxiter(self): + return self.config["fit"]["maxiter"] + + @property + def deltachi(self): + return self.config["fit"]["deltachi"] + + @property + def weightflag(self): + return self.config["fit"]["fitweight"] + + @linked_property + def linearfitflag(self): + linearfitflag = int(self.parameter_types == ParameterType.independent_linear) + self.config["fit"]["linearfitflag"] = linearfitflag + return linearfitflag + + @linearfitflag.setter + def linearfitflag(self, value): + if value: + self.parameter_types = ParameterType.independent_linear + else: + self.parameter_types = AllParameterTypes + + @contextmanager + def linear_context(self, linear=None): + keep = self.linearfitflag + if linear is not None: + self.linearfitflag = linear + try: + yield + finally: + self.linearfitflag = keep + + @property + def parameter_types(self): + return super(type(self), type(self)).parameter_types.fget(self) + + @parameter_types.setter + def parameter_types(self, value): + super(type(self), type(self)).parameter_types.fset(self, value) + self.config["fit"]["linearfitflag"] = int( + bool(self.parameter_types & ParameterType.independent_linear) + ) + + @contextmanager + def _custom_iterative_optimization_context(self): + with super()._custom_iterative_optimization_context(): + if abs(self.zero) < 1.0e-10: + self.zero = 0.0 + yield + + @contextmanager + def _propertyCachingContext(self, **kw): + with super()._propertyCachingContext(**kw) as values_cache: + with self._link_continuum_model_cache(): + yield values_cache + + @contextmanager + def _link_continuum_model_cache(self): + model = self.continuumModel + if model is None: + yield + return + keep = model.coefficients + try: + # Replace the model's coefficient storage by the cache view + if isinstance(model, LinearPolynomialModel): + model._coefficients = self.linpol_coefficients + else: + model._coefficients = self.exppol_coefficients + yield + finally: + model._coefficients = keep + + def startFit(self, digest=False, linear=None): + """Fit with legacy output""" + with self.linear_context(linear=linear): + result = self.fit(full_output=True) + self._last_fit_result = result + if digest: + return self._legacyresult(result), self.digestresult() + else: + return self._legacyresult(result) + + @staticmethod + def _legacyresult(result): + return ( + result["parameters"], + result["chi2_red"], + result["uncertainties"], + result["niter"], + result["lastdeltachi"], + ) + + def digestresult(self, outfile=None, info=None): + return self._digestresult(outfile=outfile, info=info, full=True) + + def imagingDigestResult(self): + return self._digestresult(full=False) + + def _digestresult(self, outfile=None, info=None, full=True): + with self.use_fit_result_context(self._last_fit_result): + + parameter_names = self.get_parameter_names() + parameter_std = self._last_fit_result["uncertainties"] + + chisq = self._last_fit_result["chi2_red"] + + if full: + xenergy = self.xenergy + yfit = self.yfullmodel + ydata = self.ydata + weighted_residuals = (ydata - yfit) / self.ystd + prechisq = weighted_residuals ** 2 + + result = { + "xdata": self.xdata, + "energy": xenergy, + "ydata": ydata, + "continuum": self.ybackground(), + "yfit": yfit, + "pileup": self.ypileup(), + "parameters": parameter_names, + "fittedpar": self._last_fit_result["parameters"], + "sigmapar": parameter_std, + "chisq": chisq, + "lastdeltachi": self._last_fit_result["lastdeltachi"], + "niter": self._last_fit_result["niter"], + "config": self.config.copy(), + } + + statistics_info = { + "xenergy": xenergy, + "ydata": ydata, + "yfit": yfit, + "prechisq": prechisq, + } + else: + statistics_info = None + result = {"chisq": chisq} + linegroup_result = self._digestLineGroupResults( + parameter_names, parameter_std, statistics_info=statistics_info + ) + result.update(linegroup_result) + if outfile: + self.saveDigestedResult(result, outfile, info=info) + + return result + + def _digestLineGroupResults( + self, parameter_names, parameter_std, statistics_info=None + ): + full = statistics_info is not None + result = dict() + groups = result["groups"] = list() + lineGroups = self._lineGroups + escapeLineGroups = self._escapeLineGroups + linegroup_areas = self.linegroup_areas + linegroup_names = self.linegroup_names + + area_multiplier = abs(1 + self.st_arearatio) + if statistics_info: + statistics_info["nfree_parameters"] = self.nfree_parameters + ndata = self.ndata + + for igroup, (group, escgroup, groupname, area) in enumerate( + zip(lineGroups, escapeLineGroups, linegroup_names, linegroup_areas) + ): + groupname = self._convert_line_name(groupname) + groups.append(groupname) + + # Total group area with error (in the channel domain) + pidx = parameter_names.index(groupname) + group_fitarea = area * area_multiplier + group_fitarea_std = parameter_std[pidx] * area_multiplier + result[groupname] = { + "fitarea": group_fitarea, + "sigmaarea": group_fitarea_std, + } + if not full: + result[groupname]["peaks"] = [linename for (_, _, linename) in group] + continue + + # Total group spectrum + yfit_group = self.mcatheory( + selected_groups=[igroup], + continuum=False, + ) + result["y" + groupname] = yfit_group + + # Information per line + peaks = result[groupname]["peaks"] = list() + escapepeaks = result[groupname]["escapepeaks"] = list() + ydatarest_group = 0 + group_channel_mask = numpy.zeros(ndata, dtype=bool) + if not escgroup: + escgroup = [[]] * len(group) + for (energy, rate, linename), esclines in zip(group, escgroup): + line_result, line_channel_mask, ydatarest_line = self._digestLineResult( + energy, + rate, + yfit_group, + group_fitarea, + group_fitarea_std, + statistics_info, + ) + line_result["ratio"] = rate + peaks.append(linename) + result[groupname][linename] = line_result + + group_channel_mask &= line_channel_mask + ydatarest_group += ydatarest_line + + for escenergy, escrate, escname in esclines: + ( + line_result, + line_channel_mask, + ydatarest_line, + ) = self._digestLineResult( + escenergy, + escrate * rate, + yfit_group, + group_fitarea, + group_fitarea_std, + statistics_info, + ) + line_result["ratio"] = escrate + escname = "{} {}".format(linename, escname.replace(" ", "_")) + escapepeaks.append(escname) + escname += "esc" + result[groupname][escname] = line_result + + group_channel_mask &= line_channel_mask + ydatarest_group += ydatarest_line + + # Line group statistics + yfitrest_group = statistics_info["yfit"] - yfit_group + ydata_group = statistics_info["ydata"] - yfitrest_group + group_dataarea = ydata_group[group_channel_mask].sum() + result[groupname]["mcaarea"] = group_dataarea + result[groupname]["statistics"] = ydatarest_group + max( + group_dataarea, group_fitarea + ) + + return result + + def _digestLineResult( + self, + energy, + rate, + yfit_group, + group_fitarea, + group_fitarea_std, + statistics_info, + ): + fwhm = self._peakFWHM(energy) + denergy = 3 * fwhm / self.GAUSS_SIGMA_TO_FWHM + line_channel_mask = (statistics_info["xenergy"] >= (energy - denergy)) & ( + statistics_info["xenergy"] <= (energy + denergy) + ) + + ndof = line_channel_mask.sum() - statistics_info["nfree_parameters"] + chisq = statistics_info["prechisq"][line_channel_mask].sum() / ndof + + total_ydata_lineroi = statistics_info["ydata"][line_channel_mask].sum() + total_ygroup_lineroi = yfit_group[line_channel_mask].sum() + ydatarest_line = rate * abs(total_ydata_lineroi - total_ygroup_lineroi) + + line_result = { + "energy": energy, + "fwhm": fwhm, + "fitarea": group_fitarea * rate, + "sigmaarea": group_fitarea_std * rate, + "statistics": total_ydata_lineroi, + "chisq": chisq, + } + + return line_result, line_channel_mask, ydatarest_line + + @staticmethod + def saveDigestedResult(result: dict, filename: str, info=None): + try: + os.remove(filename) + except Exception: + pass + if info is None: + dcfg = ConfigDict.ConfigDict({"result": result}) + else: + dcfg = ConfigDict.ConfigDict({"result": result, "info": info}) + dcfg.write(filename) + + +class MultiMcaTheory(LeastSquaresCombinedFitModel): + def __init__(self, ndetectors=1): + models = {f"detector{i}": McaTheory() for i in range(ndetectors)} + super().__init__(models) + # self._enable_property_link("concentrations") diff --git a/PyMca5/PyMcaPhysics/xrf/XRFMC/XRFMCHelper.py b/PyMca5/PyMcaPhysics/xrf/XRFMC/XRFMCHelper.py index 1c3387862..ae722fed0 100644 --- a/PyMca5/PyMcaPhysics/xrf/XRFMC/XRFMCHelper.py +++ b/PyMca5/PyMcaPhysics/xrf/XRFMC/XRFMCHelper.py @@ -232,7 +232,7 @@ def getXRFMCCorrectionFactors(fitConfiguration, xmimsim_pymca=None, verbose=Fals else: # for the time being we have to build a "fit-like" file with the information import numpy - from PyMca5.PyMca import ClassMcaTheory + from PyMca5.PyMca import LegacyMcaTheory fitConfiguration['fit']['linearfitflag']=1 fitConfiguration['fit']['stripflag']=0 fitConfiguration['fit']['stripiterations']=0 @@ -241,7 +241,7 @@ def getXRFMCCorrectionFactors(fitConfiguration, xmimsim_pymca=None, verbose=Fals #xdata = numpy.arange(xmin, xmax + 1) * 1.0 xdata = numpy.arange(0, xmax + 1) * 1.0 ydata = 0.0 + 0.1 * xdata - mcaFit = ClassMcaTheory.McaTheory() + mcaFit = LegacyMcaTheory.McaTheory() mcaFit.configure(fitConfiguration) #a dummy time dummyTime = 1.0 diff --git a/PyMca5/tests/CachingModelTest.py b/PyMca5/tests/CachingModelTest.py new file mode 100644 index 000000000..44f14ea13 --- /dev/null +++ b/PyMca5/tests/CachingModelTest.py @@ -0,0 +1,175 @@ +import unittest +import numpy +import functools +from collections import Counter +from PyMca5.PyMcaMath.fitting.model.CachingModel import CachedPropertiesModel +from PyMca5.PyMcaMath.fitting.model.CachingModel import CachingModel +from PyMca5.PyMcaMath.fitting.model.CachingModel import cached_property + + +class Cached(CachedPropertiesModel): + def __init__(self): + super().__init__() + self._cfg = {"var1": 1, "var2": 2} + self.reset_counters() + + def reset_counters(self): + self.get_counter = Counter() + self.set_counter = Counter() + + @cached_property + def var1(self): + self.get_counter["var1"] += 1 + return self._cfg.get("var1") + + @var1.setter + def var1(self, value): + self.set_counter["var1"] += 1 + self._cfg["var1"] = value + + @cached_property + def var2(self): + self.get_counter["var2"] += 1 + return self._cfg.get("var2") + + @var2.setter + def var2(self, value): + self.set_counter["var2"] += 1 + self._cfg["var2"] = value + + def _property_index_from_id(self, name): + if name == "var1": + return 0 + else: + return 1 + + def _create_empty_property_values_cache(self, key, **_): + return numpy.zeros(2, dtype=float) + + +class ExternalCached(CachingModel): + def _property_index_from_id(self, name): + if name == "var1": + return 2 + elif name == "var2": + return 3 + else: + raise ValueError(name) + + def _create_empty_property_values_cache(self, key, **_): + return numpy.zeros(4, dtype=float) + + +def external_subtests(method): + @functools.wraps(method) + def wrapper(self): + with self.subTest("no external cache"): + method(self) + with self.subTest("with external cache"): + self.cached_object = Cached() + self.cached_object._cache_manager = ExternalCached() + method(self) + + return wrapper + + +class testCachedInterface(unittest.TestCase): + def setUp(self): + self.cached_object = Cached() + + @external_subtests + def test_get_without_caching(self): + """verify property get/set count when getting a cached property value""" + for i in range(5): + self.assertEqual(self.cached_object.var1, 1) + self._assertGetSetCount("var1", i + 1, 0) + self._assertGetSetCount("var2", 0, 0) + + @external_subtests + def test_set_without_caching(self): + """verify property get/set count when setting a cached property value""" + for i in range(5): + self.cached_object.var1 = 100 + self._assertGetSetCount("var1", 0, i + 1) + self._assertGetSetCount("var2", 0, 0) + self._assertVarValues(100, 2) + + @external_subtests + def test_get_with_caching(self): + """verify property get/set count when getting a cached property value""" + with self.cached_object._propertyCachingContext() as cache: + self._assertCache(cache, [1, 2]) + for i in range(5): + self.assertEqual(self.cached_object.var1, 1) + self._assertGetSetCount("var1", 1, 0) + self._assertGetSetCount("var2", 1, 0) + + @external_subtests + def test_set_with_caching(self): + """verify property get/set count when setting a cached property value""" + with self.cached_object._propertyCachingContext() as cache: + for i in range(5): + self.cached_object.var1 = 100 + self.assertEqual(self.cached_object.var1, 100) + self._assertCache(cache, [100, 2]) + self._assertGetSetCount("var1", 1, 0) + self._assertGetSetCount("var2", 1, 0) + self._assertVarValues(1, 2) + + @external_subtests + def test_set_with_persistent_caching(self): + """verify cache persistency""" + with self.cached_object._propertyCachingContext(persist=True) as cache: + for i in range(5): + self.cached_object.var1 = 100 + self.assertEqual(self.cached_object.var1, 100) + self._assertCache(cache, [100, 2]) + self._assertGetSetCount("var1", 1, 1) + self._assertGetSetCount("var2", 1, 1) + self._assertVarValues(100, 2) + + @external_subtests + def test_start_cache(self): + """verify cache initialization""" + with self.cached_object._propertyCachingContext( + start_cache=self._start_cache([100, 200]), + ) as cache: + self._assertCache(cache, [100, 200]) + self.assertEqual(self.cached_object.var1, 100) + self.assertEqual(self.cached_object.var2, 200) + self._assertGetSetCount("var1", 0, 0) + self._assertGetSetCount("var2", 0, 0) + self._assertVarValues(1, 2) + + @external_subtests + def test_persistent_start_cache(self): + """verify cache persistency""" + with self.cached_object._propertyCachingContext( + start_cache=self._start_cache([100, 200]), persist=True + ) as cache: + self._assertCache(cache, [100, 200]) + self._assertGetSetCount("var1", 0, 1) + self._assertGetSetCount("var2", 0, 1) + self._assertVarValues(100, 200) + + def _assertGetSetCount(self, name, getcount, setcount): + self.assertEqual(self.cached_object.get_counter[name], getcount) + self.assertEqual(self.cached_object.set_counter[name], setcount) + + def _assertVarValues(self, v1, v2): + self.assertEqual(self.cached_object.var1, v1) + self.assertEqual(self.cached_object.var2, v2) + + def _assertCache(self, cache, values): + if not isinstance(cache, list): + cache = cache.tolist() + if self.cached_object._cache_manager is self.cached_object: + self.assertEqual(cache, values) + else: + self.assertEqual(cache, [0, 0] + values) + + def _start_cache(self, start_cache): + if self.cached_object._cache_manager is self.cached_object: + return start_cache + else: + return [0, 0] + start_cache diff --git a/PyMca5/tests/FitPolModelTest.py b/PyMca5/tests/FitPolModelTest.py new file mode 100644 index 000000000..38511c6b0 --- /dev/null +++ b/PyMca5/tests/FitPolModelTest.py @@ -0,0 +1,100 @@ +import unittest +import numpy +from PyMca5.PyMcaMath.fitting.model import PolynomialModels +from PyMca5.PyMcaMath.fitting.model.ParameterModel import ParameterType +from PyMca5.PyMcaMath.fitting.model.ParameterModel import AllParameterTypes + + +class testFitPolModel(unittest.TestCase): + def setUp(self): + self.random_state = numpy.random.RandomState(seed=0) + + def testLinearPol(self): + model = PolynomialModels.LinearPolynomialModel() + fitmodel = PolynomialModels.LinearPolynomialModel() + model.xdata = fitmodel.xdata = numpy.linspace(0, 100, 100) + + for degree in [0, 1, 5]: + with self.subTest(degree=degree): + model.degree = degree + fitmodel.degree = degree + ncoeff = degree + 1 + expected = self.random_state.uniform(low=-5, high=5, size=ncoeff) + model.coefficients = expected + actual = model.get_parameter_values() + numpy.testing.assert_array_equal(actual, expected) + + names = model.get_parameter_group_names() + expected_names = ("fitmodel_coefficients",) + self.assertEqual(names, expected_names) + + fitmodel.ydata = model.yfullmodel + numpy.testing.assert_array_equal(fitmodel.ydata, model.yfullmodel) + numpy.testing.assert_array_equal(fitmodel.yfitdata, model.yfitmodel) + numpy.testing.assert_array_equal(model.yfitmodel, model.yfullmodel) + + for parameter_types in [ + ParameterType.independent_linear, + AllParameterTypes, + ]: + with self.subTest(degree=degree, parameter_types=parameter_types): + fitmodel.parameter_types = parameter_types + fitmodel.coefficients = numpy.zeros_like(expected) + self.assertEqual(fitmodel.degree, degree) + result = fitmodel.fit()["parameters"] + numpy.testing.assert_allclose(result, expected, rtol=1e-4) + + def testExpPol(self): + model = PolynomialModels.ExponentialPolynomialModel() + fitmodel = PolynomialModels.ExponentialPolynomialModel() + model.xdata = fitmodel.xdata = numpy.linspace(-0.5, 0.5, 100) + + for degree in [0, 1, 5]: + with self.subTest(degree=degree): + model.degree = degree + fitmodel.degree = degree + ncoeff = degree + 1 + expected = self.random_state.uniform(low=-5, high=5, size=ncoeff) + model.coefficients = expected + expected[0] = numpy.log(expected[0]) + actual = model.get_parameter_values() + numpy.testing.assert_array_equal(actual, expected) + + fitmodel.ydata = model.yfullmodel + numpy.testing.assert_array_equal(fitmodel.ydata, model.yfullmodel) + numpy.testing.assert_allclose(fitmodel.yfitdata, model.yfitmodel) + numpy.testing.assert_allclose( + model.yfitmodel, numpy.log(model.yfullmodel) + ) + + for parameter_types in [ + ParameterType.independent_linear, + AllParameterTypes, + ]: + with self.subTest(degree=degree, parameter_types=parameter_types): + fitmodel.parameter_types = parameter_types + fitmodel.coefficients = numpy.zeros_like(expected) + if parameter_types == AllParameterTypes: + fitmodel.coefficients[0] = 0.1 + self.assertEqual(fitmodel.degree, degree) + result = fitmodel.fit()["parameters"] + numpy.testing.assert_allclose(result, expected) + + +def getSuite(auto=True): + testSuite = unittest.TestSuite() + if auto: + testSuite.addTest(unittest.TestLoader().loadTestsFromTestCase(testFitPolModel)) + else: + # use a predefined order + testSuite.addTest(testFitPolModel("testLinearPol")) + testSuite.addTest(testFitPolModel("testExpPol")) + return testSuite + + +def test(auto=False): + unittest.TextTestRunner(verbosity=2).run(getSuite(auto=auto)) + + +if __name__ == "__main__": + test() diff --git a/PyMca5/tests/FitSimpleModelTest.py b/PyMca5/tests/FitSimpleModelTest.py new file mode 100644 index 000000000..82d267564 --- /dev/null +++ b/PyMca5/tests/FitSimpleModelTest.py @@ -0,0 +1,345 @@ +import unittest +from contextlib import contextmanager +import numpy +from PyMca5.tests.SimpleModel import SimpleModel +from PyMca5.tests.SimpleModel import SimpleCombinedModel +from PyMca5.PyMcaMath.fitting.model.ParameterModel import ParameterType +from PyMca5.PyMcaMath.fitting.model.ParameterModel import AllParameterTypes + + +class testFitModel(unittest.TestCase): + def setUp(self): + self.random_state = numpy.random.RandomState(seed=100) + + def testLinearFit(self): + for _ in self._fit_model_subtests(): + self._test_fit(ParameterType.independent_linear) + + def testNonLinearFit(self): + for _ in self._fit_model_subtests(): + self._test_fit(AllParameterTypes) + + def _test_fit(self, parameter_types): + self.fitmodel.parameter_types = parameter_types + refined_params = self.fitmodel.get_parameter_values( + parameter_types=AllParameterTypes + ).copy() + lin_refined_params = self.fitmodel.get_parameter_values( + parameter_types=ParameterType.independent_linear + ).copy() + self._modify_random(parameter_types=parameter_types) + + before = self.fitmodel.get_parameter_values(parameter_types=AllParameterTypes) + lin_before = self.fitmodel.get_parameter_values( + parameter_types=ParameterType.independent_linear + ) + + # with self._profile("test"): + result = self.fitmodel.fit(full_output=True) + + # Verify the expected fit parameters + rtol = 1e-3 + if result["parameter_types"] == AllParameterTypes: + numpy.testing.assert_allclose( + result["parameters"], refined_params, rtol=rtol + ) + else: + numpy.testing.assert_allclose( + result["parameters"], lin_refined_params, rtol=rtol + ) + + # Check that the model has not been affected + after = self.fitmodel.get_parameter_values(parameter_types=AllParameterTypes) + lin_after = self.fitmodel.get_parameter_values( + parameter_types=ParameterType.independent_linear + ) + numpy.testing.assert_array_equal(before, after) + numpy.testing.assert_array_equal(lin_before, lin_after) + + # Modify the fit model + self._assert_model_not_refined(refined_params, lin_refined_params) + self.fitmodel.use_fit_result(result) + self._assert_model_refined(refined_params, lin_refined_params, rtol=rtol) + + def _assert_model_not_refined(self, refined_params, lin_refined_params): + self.assertTrue( + not numpy.allclose(self.fitmodel.ydata, self.fitmodel.yfullmodel) + ) + parameters = self.fitmodel.get_parameter_values( + parameter_types=AllParameterTypes + ) + self.assertTrue(not numpy.allclose(parameters, refined_params)) + parameters = self.fitmodel.get_parameter_values( + parameter_types=ParameterType.independent_linear + ) + self.assertTrue(not numpy.allclose(parameters, lin_refined_params)) + + def _assert_model_refined(self, refined_params, lin_refined_params, rtol=1e-7): + parameters = self.fitmodel.get_parameter_values( + parameter_types=AllParameterTypes + ) + numpy.testing.assert_allclose(parameters, refined_params, rtol=rtol) + parameters = self.fitmodel.get_parameter_values( + parameter_types=ParameterType.independent_linear + ) + numpy.testing.assert_allclose(parameters, lin_refined_params, rtol=rtol) + numpy.testing.assert_allclose(self.fitmodel.ydata, self.fitmodel.yfullmodel) + + def _assert_fit_result(self, result, expected): + p = numpy.asarray(result["parameters"]) + pstd = numpy.asarray(result["uncertainties"]) + ll = p - 3 * pstd + ul = p + 3 * pstd + self.assertTrue(all((expected >= ll) & (expected <= ul))) + + def _fit_model_subtests(self): + for nmodels in [1, 4]: + with self.subTest(nmodels=nmodels): + self._create_model(nmodels=nmodels) + self._validate_models() + yield + self._validate_models() + + def _create_model(self, nmodels): + self.nmodels = nmodels + self.is_combined_model = nmodels != 1 + if nmodels == 1: + self.fitmodel = SimpleModel() + else: + self.fitmodel = SimpleCombinedModel(ndetectors=nmodels) + self.assertTrue(self.fitmodel.parameter_types, AllParameterTypes) + + self._init_random() + + ydata = self.fitmodel.yfullmodel.copy() + self.fitmodel.ydata = ydata + numpy.testing.assert_array_equal(self.fitmodel.ydata, ydata) + numpy.testing.assert_array_equal(self.fitmodel.yfullmodel, ydata) + numpy.testing.assert_allclose( + self.fitmodel.yfitmodel, ydata - self.background, atol=1e-12 + ) + + def _init_random(self, **kw): + self.npeaks = 13 # concentrations + self.nshapeparams = 4 # zero, gain, wzero, wgain + self.nchannels = 2048 + self.border = 0.1 # peak positions not within this border fraction + self.background = 10 + if self.is_combined_model: + for model in self.fitmodel.models: + self._init_random_model(model) + else: + self._init_random_model(self.fitmodel) + + def _init_random_model(self, model): + nchannels = self.nchannels + model.xdata_raw = numpy.arange(nchannels) + model.ydata_raw = numpy.full(nchannels, numpy.nan) + model.ybkg = self.background + model.xmin = self.random_state.randint(low=0, high=10) + model.xmax = self.random_state.randint(low=nchannels - 10, high=nchannels) + model.zero = self.random_state.uniform(low=1, high=1.5) + model.gain = self.random_state.uniform(low=10e-3, high=11e-3) + + # Peaks too close to the border will cause numerical checking to fail + a = model.zero + b = model.zero + model.gain * nchannels + border = self.border * (b - a) + a += border + b -= border + npeaks = self.npeaks + model.positions = numpy.linspace(a, b, npeaks) + + model.wzero = self.random_state.uniform(low=0.0, high=0.01) + model.wgain = self.random_state.uniform(low=0.05, high=0.1) + model.concentrations = self.random_state.uniform(low=0.5, high=1, size=npeaks) + model.efficiency = self.random_state.uniform(low=5000, high=6000, size=npeaks) + + def _modify_random(self, parameter_types): + self._modify_random_model(parameter_types) + self._validate_models() + + def _modify_random_model(self, parameter_types): + self.assertIn( + parameter_types, (AllParameterTypes, ParameterType.independent_linear) + ) + pnonlinorg = self.fitmodel.get_parameter_values( + parameter_types=ParameterType.non_linear + ) + plinorg = self.fitmodel.get_parameter_values( + parameter_types=ParameterType.independent_linear + ) + pallorg = self.fitmodel.get_parameter_values(parameter_types=AllParameterTypes) + + if parameter_types is AllParameterTypes: + pmod = pnonlinorg.copy() + pmod *= self.random_state.uniform(0.95, 1, len(pmod)) + self.fitmodel.set_parameter_values( + pmod, parameter_types=ParameterType.non_linear + ) + parameters = self.fitmodel.get_parameter_values( + parameter_types=ParameterType.non_linear + ) + numpy.testing.assert_array_equal(parameters, pmod) + + pmod = plinorg.copy() + pmod *= self.random_state.uniform(0.5, 0.8, len(pmod)) + self.fitmodel.set_parameter_values( + pmod, parameter_types=ParameterType.independent_linear + ) + parameters = self.fitmodel.get_parameter_values( + parameter_types=ParameterType.independent_linear + ) + numpy.testing.assert_array_equal(parameters, pmod) + + pall = self.fitmodel.get_parameter_values(parameter_types=AllParameterTypes) + for group in self.fitmodel.get_parameter_groups( + parameter_types=AllParameterTypes + ): + current = pall[group.index] + expected = pallorg[group.index] + if parameter_types is AllParameterTypes or group.is_independent_linear: + # Values are expected to be modified + if group.count == 1: + self.assertNotEqual(current, expected, msg=group.name) + else: + self.assertFalse(all(current == expected), msg=group.name) + else: + # Values are not expected to be modified + if group.count == 1: + self.assertEqual(current, expected, msg=group.name) + else: + self.assertTrue(all(current == expected), msg=group.name) + + def _validate_models(self): + self._validate_model(self.fitmodel) + if self.is_combined_model: + for model_idx, model in enumerate(self.fitmodel.models): + self._validate_model(model, model_idx) + self._validate_model(self.fitmodel) + + def _validate_model(self, model, model_idx=None): + is_combined_model = self.is_combined_model and model_idx is None + original_parameters = model.get_parameter_values( + parameter_types=AllParameterTypes + ).copy() + original_linear_parameters = model.get_parameter_values( + parameter_types=ParameterType.independent_linear + ).copy() + + nonlin_expected = {"gain", "wgain", "wzero", "zero"} + if self.is_combined_model: + if is_combined_model: + nonlin_expected = { + f"detector{model_idx}:{name}" + for model_idx in range(self.nmodels) + for name in nonlin_expected + } + else: + nonlin_expected = { + f"detector{model_idx}:{name}" for name in nonlin_expected + } + lin_expected = {"concentrations"} + all_expected = lin_expected | nonlin_expected + names = model.get_parameter_group_names(parameter_types=AllParameterTypes) + self.assertEqual(set(names), all_expected) + names = model.get_parameter_group_names( + parameter_types=ParameterType.independent_linear + ) + self.assertEqual(set(names), lin_expected) + + lin_expected = {f"concentrations{i}" for i in range(self.npeaks)} + all_expected = lin_expected | nonlin_expected + names = model.get_parameter_names(parameter_types=AllParameterTypes) + self.assertEqual(set(names), all_expected) + names = model.get_parameter_names( + parameter_types=ParameterType.independent_linear + ) + self.assertEqual(set(names), lin_expected) + + n = model.ndata + nexpected = len(model.xdata) + self.assertEqual(n, nexpected) + + nexpected = len(model.get_parameter_values(parameter_types=AllParameterTypes)) + n = model.get_n_parameters(parameter_types=AllParameterTypes) + self.assertEqual(n, nexpected) + + nexpected = len( + model.get_parameter_values(parameter_types=ParameterType.independent_linear) + ) + n = model.get_n_parameters(parameter_types=ParameterType.independent_linear) + self.assertEqual(n, nexpected) + + arr1 = model.evaluate_fullmodel() + arr2 = model.evaluate_decomposed_fullmodel() + arr3 = model.yfullmodel + numpy.testing.assert_allclose(arr1, arr2) + numpy.testing.assert_allclose(arr1, arr3) + + arr1 = model.evaluate_fitmodel() + arr2 = model.evaluate_decomposed_fitmodel() + arr3 = model.yfitmodel + arr4 = sum(model.linear_decomposition_fitmodel()) + numpy.testing.assert_allclose(arr1, arr2) + numpy.testing.assert_allclose(arr1, arr3) + numpy.testing.assert_allclose(arr1, arr4) + + if is_combined_model: + nmodels = model.nmodels + nexpected = self.npeaks + self.nshapeparams * nmodels + else: + nexpected = self.npeaks + self.nshapeparams + n = model.get_n_parameters(parameter_types=AllParameterTypes) + self.assertEqual(n, nexpected) + n = model.get_n_parameters(parameter_types=ParameterType.independent_linear) + self.assertEqual(n, self.npeaks) + + rtol = 1e-3 + for parameter_types in (ParameterType.independent_linear, AllParameterTypes): + with model.parameter_types_context(parameter_types): + noncached = model.compare_derivatives() + cached = model.compare_derivatives() + err_fmt = "[parameter_types={}] {{}} and {{}} derivative of '{{}}' (model: {}) are not equal".format( + parameter_types, model_idx + ) + for deriv, deriv_cached in zip(noncached, cached): + param_name, calc, numerical = deriv + param_name_cached, calc_cached, numerical_cached = deriv_cached + err_msg = err_fmt.format("analytical", "numerical", param_name) + self.assertEqual(param_name, param_name_cached) + numpy.testing.assert_allclose( + calc, numerical, err_msg=err_msg, rtol=rtol + ) + err_msg = err_fmt.format( + "cached analytical", "numerical", param_name_cached + ) + numpy.testing.assert_allclose( + calc_cached, numerical_cached, err_msg=err_msg, rtol=rtol + ) + err_msg = err_fmt.format("cached", "non-cached ", param_name) + numpy.testing.assert_allclose( + calc, calc_cached, err_msg=err_msg, rtol=rtol + ) + + parameters = model.get_parameter_values(parameter_types=AllParameterTypes) + numpy.testing.assert_array_equal(original_parameters, parameters) + parameters = model.get_parameter_values( + parameter_types=ParameterType.independent_linear + ) + numpy.testing.assert_array_equal(original_linear_parameters, parameters) + + def _vis_compare(self, a, b): + import matplotlib.pyplot as plt + + plt.plot(a) + plt.plot(b) + plt.show() + + @contextmanager + def _profile(self, name): + from PyMca5.PyMcaMisc.ProfilingUtils import profile + + filename = f"{name}.pyprof" + with profile(memory=False, filename=filename): + yield diff --git a/PyMca5/tests/LinkedModelTest.py b/PyMca5/tests/LinkedModelTest.py new file mode 100644 index 000000000..40db322ef --- /dev/null +++ b/PyMca5/tests/LinkedModelTest.py @@ -0,0 +1,233 @@ +import unittest +from collections import Counter +from PyMca5.PyMcaMath.fitting.model.LinkedModel import LinkedModel +from PyMca5.PyMcaMath.fitting.model.LinkedModel import LinkedModelManager +from PyMca5.PyMcaMath.fitting.model.LinkedModel import linked_contextmanager +from PyMca5.PyMcaMath.fitting.model.LinkedModel import linked_property + + +class ModelBase(LinkedModel): + def __init__(self): + super().__init__() + self.context_counter = 0 + + @linked_contextmanager + def context(self): + self.context_counter += 1 + yield + + def fit(self): + with self.context(): + pass + + +class Model1(ModelBase): + def __init__(self, cfg): + super().__init__() + self._cfg = cfg + self.reset_counters() + + def reset_counters(self): + self.get_counter = Counter() + self.set_counter = Counter() + + @linked_property + def var1(self): + self.get_counter["var1"] += 1 + return self._cfg.get("var1") + + @var1.setter + def var1(self, value): + self.set_counter["var1"] += 1 + self._cfg["var1"] = value + + +class Model2(Model1): + @linked_property + def var2(self): + self.get_counter["var2"] += 1 + return self._cfg.get("var2") + + @var2.setter + def var2(self, value): + self.set_counter["var2"] += 1 + self._var2 = value + self._cfg["var2"] = value + + +class ConcatModel(LinkedModelManager, ModelBase): + def __init__(self): + cfg1a = {"var1": 1} + cfg1b = {"var1": 2} + cfg2a = {"var1": 3, "var2": 4} + cfg2b = {"var1": 5, "var2": 6} + instances = { + 0: Model1(cfg1a), + 1: Model1(cfg1b), + 2: Model2(cfg2a), + 3: Model2(cfg2b), + } + super().__init__(instances) + + def __getitem__(self, index): + return self._linked_instance_mapping[index] + + def reset_counters(self): + for model in self._linked_instances: + model.reset_counters() + + def link(self): + self._enable_property_link("var1", "var2") + self.reset_counters() + + def unlink(self): + self._disable_property_link("var1", "var2") + self.reset_counters() + + +class testLinkedModel(unittest.TestCase): + def setUp(self): + self.concat_model = ConcatModel() + + def test_links(self): + """establish links""" + nlinked = len(self.concat_model._linked_instances) - 1 + for model in self.concat_model._linked_instances: + self.assertEqual(len(list(model._linked_instances)), nlinked) + + def test_init_properties(self): + """initial property values""" + self.assert_property_values("var1", 1, [1, 2, 3, 5]) + self.assert_property_values("var2", 4, [4, 6]) + + def test_enable_property_link_syncing(self): + """maximal 1 get/set of a linked property when enabling linking""" + for i, model in enumerate(self.concat_model._linked_instances): + model.var1 = 100 + i + if model._has_linked_property("var2"): + model.var2 = 200 + i + self.assert_property_values("var1", 100, [100, 101, 102, 103]) + self.assert_property_values("var2", 202, [202, 203]) + self.concat_model.reset_counters() + + self.concat_model._enable_property_link("var1") + getmodel = self.concat_model._instance_with_linked_property("var1") + for model in self.concat_model._linked_instances: + if model is getmodel: + self.assertEqual(model.get_counter["var1"], 1) + self.assertTrue(model.set_counter["var1"] <= 1) + else: + self.assertEqual(model.get_counter["var1"], 0) + self.assertEqual(model.set_counter["var1"], 1) + self.assertEqual(model.get_counter["var2"], 0) + self.assertEqual(model.set_counter["var2"], 0) + self.assert_property_values("var1", 100) + self.assert_property_values("var2", 202, [202, 203]) + + def test_contexts_concat(self): + """entering a linked context manager of a container""" + self.concat_model.fit() + self.assertEqual(self.concat_model.context_counter, 1) + for model in self.concat_model._linked_instances: + model.context_counter, 1 + + def test_contexts_single(self): + """entering a linked context manager of a single instance""" + model = self.concat_model[2] + model.fit() + self.assertEqual(model.context_counter, 1) + for model in self.concat_model._linked_instances: + model.context_counter, 1 + + def test_get_var1_concat(self): + """getting a linked property (present in all models) from a container""" + self.concat_model.link() + self.assertEqual(self.concat_model._get_linked_property_value("var1"), 1) + for i, model in enumerate(self.concat_model._linked_instances): + self.assertEqual( + model.get_counter["var1"], 1 if i == 0 else 0, msg=f"model{i}" + ) + self.assertEqual(model.set_counter["var1"], 0, msg=f"model{i}") + + def test_get_var1_single(self): + """getting a linked property (present in all models) from a single instance""" + self.concat_model.link() + model = self.concat_model[2] + self.assertEqual(model.var1, 1) + for i, model in enumerate(self.concat_model._linked_instances): + self.assertEqual( + model.get_counter["var1"], 1 if i == 2 else 0, msg=f"model{i}" + ) + self.assertEqual(model.set_counter["var1"], 0, msg=f"model{i}") + + def test_get_var2_concat(self): + """getting a linked property (present in some models) from a container""" + self.concat_model.link() + self.assertEqual(self.concat_model._get_linked_property_value("var2"), 4) + for i, model in enumerate(self.concat_model._linked_instances): + self.assertEqual( + model.get_counter["var2"], 1 if i == 2 else 0, msg=f"model{i}" + ) + self.assertEqual(model.set_counter["var2"], 0, msg=f"model{i}") + + def test_get_var2_single(self): + """getting a linked property (present in some models) from a single instance""" + self.concat_model.link() + model = self.concat_model[2] + self.assertEqual(model.var2, 4) + for i, model in enumerate(self.concat_model._linked_instances): + self.assertEqual( + model.get_counter["var2"], 1 if i == 2 else 0, msg=f"model{i}" + ) + self.assertEqual(model.set_counter["var2"], 0, msg=f"model{i}") + + def test_set_var1_concat(self): + """setting a linked property (present in all models) from a container""" + self.concat_model.link() + self.concat_model._set_linked_property_value("var1", 100) + for i, model in enumerate(self.concat_model._linked_instances): + self.assertEqual(model.get_counter["var1"], 0, msg=f"model{i}") + self.assertEqual(model.set_counter["var1"], 1, msg=f"model{i}") + self.assert_property_values("var1", 100) + + def test_set_var1_single(self): + """setting a linked property (present in all models) from a single instance""" + self.concat_model.link() + model = self.concat_model[2] + model.var1 = 1 + for i, model in enumerate(self.concat_model._linked_instances): + self.assertEqual(model.get_counter["var1"], 0, msg=f"model{i}") + self.assertEqual(model.set_counter["var1"], 1, msg=f"model{i}") + + def test_set_var2_concat(self): + """setting a linked property (present in some models) from a container""" + self.concat_model.link() + self.concat_model._set_linked_property_value("var2", 100) + for i, model in enumerate(self.concat_model._linked_instances): + self.assertEqual(model.get_counter["var2"], 0, msg=f"model{i}") + self.assertEqual( + model.set_counter["var2"], 1 if i > 1 else 0, msg=f"model{i}" + ) + self.assert_property_values("var2", 100) + + def test_set_var2_single(self): + """setting a linked property (present in some models) from a single instance""" + self.concat_model.link() + model = self.concat_model[2] + model.var2 = 100 + for i, model in enumerate(self.concat_model._linked_instances): + self.assertEqual(model.get_counter["var2"], 0, msg=f"model{i}") + self.assertEqual( + model.set_counter["var2"], 1 if i > 1 else 0, msg=f"model{i}" + ) + + def assert_property_values(self, name, value, values=None): + self.assertEqual( + self.concat_model._get_linked_property_value(name), value, msg=name + ) + if not isinstance(values, list): + values = [value] * len(self.concat_model._linked_instances) + for model, v in zip( + self.concat_model._instances_with_linked_property(name), values + ): + self.assertEqual(getattr(model, name), v, msg=name) diff --git a/PyMca5/tests/McaAdvancedFitWidgetTest.py b/PyMca5/tests/McaAdvancedFitWidgetTest.py index 612ea565b..52b12317e 100644 --- a/PyMca5/tests/McaAdvancedFitWidgetTest.py +++ b/PyMca5/tests/McaAdvancedFitWidgetTest.py @@ -79,7 +79,7 @@ def _workOnBackend(self, backend): from PyMca5 import PyMcaDataDir dataDir = PyMcaDataDir.PYMCA_DATA_DIR from PyMca5.PyMcaIO import specfilewrapper as specfile - from PyMca5.PyMcaPhysics.xrf import ClassMcaTheory + from PyMca5.PyMcaPhysics.xrf import LegacyMcaTheory from PyMca5.PyMcaPhysics.xrf import ConcentrationsTool from PyMca5.PyMcaIO import ConfigDict diff --git a/PyMca5/tests/ParameterModelTest.py b/PyMca5/tests/ParameterModelTest.py new file mode 100644 index 000000000..49fd5a023 --- /dev/null +++ b/PyMca5/tests/ParameterModelTest.py @@ -0,0 +1,621 @@ +import unittest +import numpy +from contextlib import contextmanager +from PyMca5.PyMcaMath.fitting.model.ParameterModel import ParameterModel +from PyMca5.PyMcaMath.fitting.model.ParameterModel import ParameterModelManager +from PyMca5.PyMcaMath.fitting.model.ParameterModel import nonlinear_parameter_group +from PyMca5.PyMcaMath.fitting.model.ParameterModel import ( + independent_linear_parameter_group, +) +from PyMca5.PyMcaMath.fitting.model.ParameterModel import ParameterType +from PyMca5.PyMcaMath.fitting.model.ParameterModel import AllParameterTypes + + +class Model1(ParameterModel): + def __init__(self, cfg): + super().__init__() + self._cfg = cfg + + @nonlinear_parameter_group + def var1_nonlin(self): + return self._cfg["var1_nonlin"] + + @var1_nonlin.setter + def var1_nonlin(self, value): + self._cfg["var1_nonlin"] = value + + @independent_linear_parameter_group + def var1_lin(self): + return self._cfg["var1_lin"] + + @var1_lin.setter + def var1_lin(self, value): + self._cfg["var1_lin"] = value + + @var1_lin.counter + def var1_lin(self): + return 2 + + @nonlinear_parameter_group + def var3_nonlin(self): + return self._cfg["var3_nonlin"] + + @var3_nonlin.setter + def var3_nonlin(self, value): + self._cfg["var3_nonlin"] = value + + @independent_linear_parameter_group + def var3_lin(self): + return self._cfg["var3_lin"] + + @var3_lin.setter + def var3_lin(self, value): + self._cfg["var3_lin"] = value + + def __getitem__(self, index): + return self._linked_instance_mapping[index] + + +class Model2(Model1): + @nonlinear_parameter_group + def var2_nonlin(self): + return self._cfg["var2_nonlin"] + + @var2_nonlin.setter + def var2_nonlin(self, value): + self._cfg["var2_nonlin"] = value + + @independent_linear_parameter_group + def var2_lin(self): + return self._cfg["var2_lin"] + + @var2_lin.setter + def var2_lin(self, value): + self._cfg["var2_lin"] = value + + +class ConcatModel(ParameterModelManager): + def __init__(self): + cfg1a = { + "var1_lin": [11, 11], + "var1_nonlin": 12, + "var3_lin": 101, + "var3_nonlin": 102, + } + cfg1b = { + "var1_lin": [21, 21], + "var1_nonlin": 22, + "var3_lin": 201, + "var3_nonlin": 202, + } + cfg2a = { + "var1_lin": [31, 31], + "var1_nonlin": 32, + "var2_lin": 41, + "var2_nonlin": 42, + "var3_lin": 301, + "var3_nonlin": 302, + } + cfg2b = { + "var1_lin": [51, 51], + "var1_nonlin": 12, + "var2_lin": 61, + "var2_nonlin": 62, + "var3_lin": 401, + "var3_nonlin": 402, + } + models = { + "model0": Model1(cfg1a), + "model1": Model1(cfg1b), + "model2": Model2(cfg2a), + "model3": Model2(cfg2b), + } + super().__init__(models) + self._enable_property_link("var1_lin", "var1_nonlin", "var2_lin", "var2_nonlin") + + def __getitem__(self, index): + while index < 0: + index += len(self._linked_instance_mapping) + return self._linked_instance_mapping[f"model{index}"] + + +class testParameterModel(unittest.TestCase): + def setUp(self): + self.concat_model = ConcatModel() + + def test_instantiation(self): + self.assertEqual(self.concat_model.nmodels, 4) + self.assertEqual(self.concat_model.parameter_types, AllParameterTypes) + for model in self.concat_model.models: + self.assertEqual(model.parameter_types, AllParameterTypes) + + def test_parameter_type(self): + for parameter_types in ParameterType: + for group in self.concat_model.get_parameter_groups( + parameter_types=parameter_types + ): + self.assertEqual(group.type, parameter_types) + types = set(group.type for group in self.concat_model.get_parameter_groups()) + expected = {ParameterType.independent_linear, ParameterType.non_linear} + self.assertEqual(types, expected) + + def test_parameter_type_context(self): + with self.concat_model.parameter_types_context( + ParameterType.independent_linear + ): + self.assertEqual( + self.concat_model.parameter_types, ParameterType.independent_linear + ) + for model in self.concat_model.models: + self.assertTrue(model.parameter_types, ParameterType.independent_linear) + + self.assertEqual(self.concat_model.parameter_types, AllParameterTypes) + for model in self.concat_model.models: + self.assertEqual(model.parameter_types, AllParameterTypes) + + def test_parameter_group_names(self): + for cacheoptions in self._parameterize_nonlinear_test(): + names = self.concat_model[0].get_parameter_group_names(**cacheoptions) + expected = ( + "var1_lin", + "var1_nonlin", + "model0:var3_lin", + "model0:var3_nonlin", + ) + self.assertEqual(tuple(names), expected) + + names = self.concat_model[-1].get_parameter_group_names(**cacheoptions) + expected = ( + "var1_lin", + "var1_nonlin", + "var2_lin", + "var2_nonlin", + "model3:var3_lin", + "model3:var3_nonlin", + ) + self.assertEqual(tuple(names), expected) + + names = self.concat_model.get_parameter_group_names(**cacheoptions) + expected = ( + "var1_lin", + "var1_nonlin", + "var2_lin", + "var2_nonlin", + "model0:var3_lin", + "model0:var3_nonlin", + "model1:var3_lin", + "model1:var3_nonlin", + "model2:var3_lin", + "model2:var3_nonlin", + "model3:var3_lin", + "model3:var3_nonlin", + ) + self.assertEqual(tuple(names), expected) + + with self._unlink_var1_lin() as unlinked: + if unlinked: + names = self.concat_model.get_parameter_group_names(**cacheoptions) + expected = ( + "var1_nonlin", + "var2_lin", + "var2_nonlin", + "model0:var1_lin", + "model0:var3_lin", + "model0:var3_nonlin", + "model1:var1_lin", + "model1:var3_lin", + "model1:var3_nonlin", + "model2:var1_lin", + "model2:var3_lin", + "model2:var3_nonlin", + "model3:var1_lin", + "model3:var3_lin", + "model3:var3_nonlin", + ) + self.assertEqual(tuple(names), expected) + + def test_independent_linear_parameter_group_names(self): + for cacheoptions in self._parameterize_linear_test(): + names = self.concat_model[0].get_parameter_group_names(**cacheoptions) + expected = ("var1_lin", "model0:var3_lin") + self.assertEqual(tuple(names), expected) + + names = self.concat_model[-1].get_parameter_group_names(**cacheoptions) + expected = ("var1_lin", "var2_lin", "model3:var3_lin") + self.assertEqual(tuple(names), expected) + + names = self.concat_model.get_parameter_group_names(**cacheoptions) + expected = ( + "var1_lin", + "var2_lin", + "model0:var3_lin", + "model1:var3_lin", + "model2:var3_lin", + "model3:var3_lin", + ) + self.assertEqual(tuple(names), expected) + + with self._unlink_var1_lin() as unlinked: + if unlinked: + names = self.concat_model.get_parameter_group_names(**cacheoptions) + expected = ( + "var2_lin", + "model0:var1_lin", + "model0:var3_lin", + "model1:var1_lin", + "model1:var3_lin", + "model2:var1_lin", + "model2:var3_lin", + "model3:var1_lin", + "model3:var3_lin", + ) + self.assertEqual(tuple(names), expected) + + def test_parameter_names(self): + for cacheoptions in self._parameterize_nonlinear_test(): + names = self.concat_model[0].get_parameter_names(**cacheoptions) + expected = ( + "var1_lin0", + "var1_lin1", + "var1_nonlin", + "model0:var3_lin", + "model0:var3_nonlin", + ) + self.assertEqual(tuple(names), expected) + + names = self.concat_model[-1].get_parameter_names(**cacheoptions) + expected = ( + "var1_lin0", + "var1_lin1", + "var1_nonlin", + "var2_lin", + "var2_nonlin", + "model3:var3_lin", + "model3:var3_nonlin", + ) + self.assertEqual(tuple(names), expected) + + names = self.concat_model.get_parameter_names(**cacheoptions) + expected = ( + "var1_lin0", + "var1_lin1", + "var1_nonlin", + "var2_lin", + "var2_nonlin", + "model0:var3_lin", + "model0:var3_nonlin", + "model1:var3_lin", + "model1:var3_nonlin", + "model2:var3_lin", + "model2:var3_nonlin", + "model3:var3_lin", + "model3:var3_nonlin", + ) + self.assertEqual(tuple(names), expected) + + with self._unlink_var1_lin() as unlinked: + if unlinked: + names = self.concat_model.get_parameter_names(**cacheoptions) + expected = ( + "var1_nonlin", + "var2_lin", + "var2_nonlin", + "model0:var1_lin0", + "model0:var1_lin1", + "model0:var3_lin", + "model0:var3_nonlin", + "model1:var1_lin0", + "model1:var1_lin1", + "model1:var3_lin", + "model1:var3_nonlin", + "model2:var1_lin0", + "model2:var1_lin1", + "model2:var3_lin", + "model2:var3_nonlin", + "model3:var1_lin0", + "model3:var1_lin1", + "model3:var3_lin", + "model3:var3_nonlin", + ) + self.assertEqual(tuple(names), expected) + + def test_linear_parameter_names(self): + for cacheoptions in self._parameterize_linear_test(): + names = self.concat_model[0].get_parameter_names(**cacheoptions) + expected = ("var1_lin0", "var1_lin1", "model0:var3_lin") + self.assertEqual(tuple(names), expected) + + names = self.concat_model[-1].get_parameter_names(**cacheoptions) + expected = ("var1_lin0", "var1_lin1", "var2_lin", "model3:var3_lin") + self.assertEqual(tuple(names), expected) + + names = self.concat_model.get_parameter_names(**cacheoptions) + expected = ( + "var1_lin0", + "var1_lin1", + "var2_lin", + "model0:var3_lin", + "model1:var3_lin", + "model2:var3_lin", + "model3:var3_lin", + ) + self.assertEqual(tuple(names), expected) + + with self._unlink_var1_lin() as unlinked: + if unlinked: + names = self.concat_model.get_parameter_names(**cacheoptions) + expected = ( + "var2_lin", + "model0:var1_lin0", + "model0:var1_lin1", + "model0:var3_lin", + "model1:var1_lin0", + "model1:var1_lin1", + "model1:var3_lin", + "model2:var1_lin0", + "model2:var1_lin1", + "model2:var3_lin", + "model3:var1_lin0", + "model3:var1_lin1", + "model3:var3_lin", + ) + self.assertEqual(tuple(names), expected) + + def test_n_parameter(self): + for cacheoptions in self._parameterize_nonlinear_test(): + n = self.concat_model[0].get_n_parameters(**cacheoptions) + self.assertEqual(n, 5) + + n = self.concat_model[-1].get_n_parameters(**cacheoptions) + self.assertEqual(n, 7) + + n = self.concat_model.get_n_parameters(**cacheoptions) + self.assertEqual(n, 13) + + with self._unlink_var1_lin() as unlinked: + if unlinked: + n = self.concat_model.get_n_parameters(**cacheoptions) + self.assertEqual(n, 19) + + def test_n_linear_parameter(self): + for cacheoptions in self._parameterize_linear_test(): + n = self.concat_model[0].get_n_parameters(**cacheoptions) + self.assertEqual(n, 3) + + n = self.concat_model[-1].get_n_parameters(**cacheoptions) + self.assertEqual(n, 4) + + n = self.concat_model.get_n_parameters(**cacheoptions) + self.assertEqual(n, 7) + + with self._unlink_var1_lin() as unlinked: + if unlinked: + n = self.concat_model.get_n_parameters(**cacheoptions) + self.assertEqual(n, 13) + + def test_parameter_constraints(self): + for cacheoptions in self._parameterize_nonlinear_test(): + arr = self.concat_model[0].get_parameter_constraints(**cacheoptions) + self.assertEqual(arr.shape, (5, 3)) + + arr = self.concat_model[-1].get_parameter_constraints(**cacheoptions) + self.assertEqual(arr.shape, (7, 3)) + + arr = self.concat_model.get_parameter_constraints(**cacheoptions) + self.assertEqual(arr.shape, (13, 3)) + + with self._unlink_var1_lin() as unlinked: + if unlinked: + arr = self.concat_model.get_parameter_constraints(**cacheoptions) + self.assertEqual(arr.shape, (19, 3)) + + def test_linear_parameter_contraints(self): + for cacheoptions in self._parameterize_linear_test(): + if not self._cached: + continue + arr = self.concat_model[0].get_parameter_constraints(**cacheoptions) + self.assertEqual(arr.shape, (3, 3)) + + arr = self.concat_model[-1].get_parameter_constraints(**cacheoptions) + self.assertEqual(arr.shape, (4, 3)) + + arr = self.concat_model.get_parameter_constraints(**cacheoptions) + self.assertEqual(arr.shape, (7, 3)) + + with self._unlink_var1_lin() as unlinked: + if unlinked: + arr = self.concat_model.get_parameter_constraints(**cacheoptions) + self.assertEqual(arr.shape, (13, 3)) + + def test_get_parameter_values(self): + for cacheoptions in self._parameterize_nonlinear_test(): + values = self.concat_model[0].get_parameter_values(**cacheoptions) + self.assertEqual(values.tolist(), [11, 11, 12, 101, 102]) + + values = self.concat_model[-1].get_parameter_values(**cacheoptions) + self.assertEqual(values.tolist(), [11, 11, 12, 41, 42, 401, 402]) + + values = self.concat_model.get_parameter_values(**cacheoptions) + expected = [11, 11, 12, 41, 42, 101, 102, 201, 202, 301, 302, 401, 402] + self.assertEqual(values.tolist(), expected) + + with self._unlink_var1_lin() as unlinked: + if unlinked: + values = self.concat_model.get_parameter_values(**cacheoptions) + expected = [ + 12, + 41, + 42, + 11, + 11, + 101, + 102, + 11, + 11, + 201, + 202, + 11, + 11, + 301, + 302, + 11, + 11, + 401, + 402, + ] + self.assertEqual(values.tolist(), expected) + + def test_parameter_values(self): + for cacheoptions in self._parameterize_nonlinear_test(): + self._assert_set_get_parameter_values( + self.concat_model[0], [11, 11, 12, 101, 102], **cacheoptions + ) + self._assert_set_get_parameter_values( + self.concat_model[-1], [11, 11, 12, 41, 42, 401, 402], **cacheoptions + ) + expected = [11, 11, 12, 41, 42, 101, 102, 201, 202, 301, 302, 401, 402] + self._assert_set_get_parameter_values( + self.concat_model, expected, **cacheoptions + ) + with self._unlink_var1_lin() as unlinked: + if unlinked: + expected = [ + 12, + 41, + 42, + 11, + 11, + 101, + 102, + 11, + 11, + 201, + 202, + 11, + 11, + 301, + 302, + 11, + 11, + 401, + 402, + ] + self._assert_set_get_parameter_values( + self.concat_model, expected, **cacheoptions + ) + + def test_linear_parameter_values(self): + for cacheoptions in self._parameterize_linear_test(): + self._assert_set_get_parameter_values( + self.concat_model[0], [11, 11, 101], **cacheoptions + ) + self._assert_set_get_parameter_values( + self.concat_model[-1], [11, 11, 41, 401], **cacheoptions + ) + self._assert_set_get_parameter_values( + self.concat_model, [11, 11, 41, 101, 201, 301, 401], **cacheoptions + ) + with self._unlink_var1_lin() as unlinked: + if unlinked: + expected = [ + 41, + 11, + 11, + 101, + 11, + 11, + 201, + 11, + 11, + 301, + 11, + 11, + 401, + ] + self._assert_set_get_parameter_values( + self.concat_model, expected, **cacheoptions + ) + + def test_parameter_property_values(self): + for cacheoptions in self._parameterize_nonlinear_test(): + self._assertValuesEqual(self.concat_model[0].var1_lin, [11, 11]) + self._assertValuesEqual(self.concat_model[0].var1_nonlin, 12) + self._assertValuesEqual(self.concat_model[0].var3_lin, 101) + self._assertValuesEqual(self.concat_model[0].var3_nonlin, 102) + + self._assertValuesEqual(self.concat_model[-1].var1_lin, [11, 11]) + self._assertValuesEqual(self.concat_model[-1].var1_nonlin, 12) + self._assertValuesEqual(self.concat_model[-1].var2_lin, 41) + self._assertValuesEqual(self.concat_model[-1].var2_nonlin, 42) + self._assertValuesEqual(self.concat_model[-1].var3_lin, 401) + self._assertValuesEqual(self.concat_model[-1].var3_nonlin, 402) + + def _assert_set_get_parameter_values(self, model, expected, **cacheoptions): + self._assert_get_parameter_values(model, expected, **cacheoptions) + with self._protect_parameter_values(**cacheoptions): + expected2 = list(range(len(expected))) + model.set_parameter_values(numpy.array(expected2), **cacheoptions) + self._assert_get_parameter_values(model, expected2, **cacheoptions) + self._assert_get_parameter_values(model, expected, **cacheoptions) + + def _assert_get_parameter_values(self, model, expected, **cacheoptions): + values = model.get_parameter_values(**cacheoptions) + self._assertValuesEqual(values, expected) + + def _assertValuesEqual(self, values, expected): + if isinstance(expected, list): + self.assertEqual(numpy.asarray(values).tolist(), expected) + else: + self.assertEqual(values, expected) + + def _parameterize_linear_test(self): + yield from self._parameterize_tests( + [ + [None, ParameterType.independent_linear], + [ParameterType.independent_linear, AllParameterTypes], + ] + ) + + def _parameterize_nonlinear_test(self): + yield from self._parameterize_tests( + [ + [None, AllParameterTypes], + [AllParameterTypes, ParameterType.independent_linear], + ] + ) + + def _parameterize_tests(self, types): + for local_type, global_type in types: + for cached in [False, True]: + with self.subTest( + local_type=local_type, + global_type=global_type, + cached=cached, + ): + self.concat_model.parameter_types = global_type + cacheoptions = {"parameter_types": local_type} + self._cached = cached + if cached: + with self.concat_model._propertyCachingContext(**cacheoptions): + yield cacheoptions + else: + yield cacheoptions + + @contextmanager + def _unlink_var1_lin(self): + if self._cached: + yield False + else: + self.concat_model._disable_property_link("var1_lin") + try: + yield True + finally: + self.concat_model._enable_property_link("var1_lin") + + @contextmanager + def _protect_parameter_values(self, **cacheoptions): + values = self.concat_model.get_parameter_values(**cacheoptions).copy() + try: + yield + finally: + self.concat_model.set_parameter_values(values, **cacheoptions) diff --git a/PyMca5/tests/SimpleModel.py b/PyMca5/tests/SimpleModel.py new file mode 100644 index 000000000..05a697d3a --- /dev/null +++ b/PyMca5/tests/SimpleModel.py @@ -0,0 +1,221 @@ +import numpy +from PyMca5.PyMcaMath.fitting import SpecfitFuns +from PyMca5.PyMcaMath.fitting.model import nonlinear_parameter_group +from PyMca5.PyMcaMath.fitting.model import independent_linear_parameter_group +from PyMca5.PyMcaMath.fitting.model import LeastSquaresFitModel +from PyMca5.PyMcaMath.fitting.model import LeastSquaresCombinedFitModel +from PyMca5.PyMcaMath.fitting.model.LinkedModel import linked_property +from PyMca5.PyMcaMath.fitting.model.ParameterModel import AllParameterTypes + + +class SimpleModel(LeastSquaresFitModel): + """Simplfied MCA model a fixed list of peak positions and efficiencies""" + + SIGMA_TO_FWHM = 2 * numpy.sqrt(2 * numpy.log(2)) + + def __init__(self): + self.config = { + "detector": {"zero": 0.0, "gain": 1.0, "wzero": 0.0, "wgain": 1.0}, + "matrix": {"positions": [], "concentrations": [], "efficiency": []}, + "fit": {"parameter_types": AllParameterTypes}, + "xmin": 0.0, + "xmax": 1.0, + } + self.xdata_raw = None + self.ydata_raw = None + self.ystd_raw = None + self.ybkg = 0 + super().__init__() + + def __str__(self): + return "{}(npeaks={}, zero={}, gain={}, wzero={}, wgain={})".format( + self.__class__, self.npeaks, self.zero, self.gain, self.wzero, self.wgain + ) + + @nonlinear_parameter_group + def zero(self): + return self.config["detector"]["zero"] + + @zero.setter + def zero(self, value): + self.config["detector"]["zero"] = value + + @nonlinear_parameter_group + def gain(self): + return self.config["detector"]["gain"] + + @gain.setter + def gain(self, value): + self.config["detector"]["gain"] = value + + @nonlinear_parameter_group + def wzero(self): + return self.config["detector"]["wzero"] + + @wzero.setter + def wzero(self, value): + self.config["detector"]["wzero"] = value + + @nonlinear_parameter_group + def wgain(self): + return self.config["detector"]["wgain"] + + @wgain.setter + def wgain(self, value): + self.config["detector"]["wgain"] = value + + @property + def efficiency(self): + return self.config["matrix"]["efficiency"] + + @efficiency.setter + def efficiency(self, value): + arr = self.config["matrix"]["efficiency"] + self.config["matrix"]["efficiency"] = value + + @linked_property + def positions(self): + return self.config["matrix"]["positions"] + + @positions.setter + def positions(self, value): + self.config["matrix"]["positions"] = value + + @property + def fwhms(self): + return self.wzero + self.wgain * self.positions + + @property + def areas(self): + return self.efficiency * self.concentrations + + @independent_linear_parameter_group + def concentrations(self): + return self.config["matrix"]["concentrations"] + + @concentrations.setter + def concentrations(self, value): + self.config["matrix"]["concentrations"] = value + + @linked_property + def parameter_types(self): + return self.config["fit"]["parameter_types"] + + @parameter_types.setter + def parameter_types(self, value): + self.config["fit"]["parameter_types"] = value + + @property + def idx_channels(self): + return slice(self.xmin, self.xmax) + + @property + def xdata(self): + if self.xdata_raw is None: + return None + else: + return self.xdata_raw[self.idx_channels] + + @xdata.setter + def xdata(self, values): + self.xdata_raw[self.idx_channels] = values + + @property + def xenergy(self): + return self.zero + self.gain * self.xdata + + def _y_full_to_fit(self, y, xdata=None): + return y - self.ybkg + + def _y_fit_to_full(self, y, xdata=None): + return y + self.ybkg + + @property + def ydata(self): + if self.ydata_raw is None: + return None + else: + return self.ydata_raw[self.idx_channels] + + @ydata.setter + def ydata(self, values): + self.ydata_raw[self.idx_channels] = values + + @property + def ystd(self): + if self.ystd_raw is None: + return None + else: + return self.ystd_raw[self.idx_channels] + + @ystd.setter + def ystd(self, values): + self.ystd_raw[self.idx_channels] = values + + @property + def ndata(self): + return self.xmax - self.xmin + + @property + def npeaks(self): + return len(self.concentrations) + + def evaluate_fitmodel(self, xdata=None): + """Evaluate model + + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + if xdata is None: + xdata = self.xdata + x = self.zero + self.gain * xdata + p = list(zip(self.areas, self.positions, self.fwhms)) + return SpecfitFuns.agauss(p, x) + + def derivative_fitmodel(self, param_idx, xdata=None, **paramtype): + """Derivate to a specific nonlinear_parameter_group + + :param int param_idx: + :param array xdata: shape (ndata,) + :returns array: shape (ndata,) + """ + if xdata is None: + xdata = self.xdata + x = self.zero + self.gain * xdata + + group = self._group_from_parameter_index(param_idx, **paramtype) + name = group.property_name + if name == "concentrations": + i = group.parameter_index_in_group(param_idx) + p = self.positions[i] + a = self.efficiency[i] + w = self.wzero + self.wgain * p + return SpecfitFuns.agauss([a, p, w], x) + + fwhms = self.fwhms + sigmas = fwhms / self.SIGMA_TO_FWHM + y = x * 0.0 + for p, a, w, s in zip(self.positions, self.areas, fwhms, sigmas): + if name in ("zero", "gain"): + # Derivative to position + m = -(x - p) / s ** 2 + # Derivative to position param + if name == "gain": + m *= xdata + elif name in ("wzero", "wgain"): + # Derivative to FWHM + m = ((x - p) ** 2 / s ** 2 - 1) / (self.SIGMA_TO_FWHM * s) + # Derivative to FWHM param + if name == "wgain": + m *= p + else: + raise ValueError(name) + y += m * SpecfitFuns.agauss([a, p, w], x) + return y + + +class SimpleCombinedModel(LeastSquaresCombinedFitModel): + def __init__(self, ndetectors=1): + models = {f"detector{i}": SimpleModel() for i in range(ndetectors)} + super().__init__(models) + self._enable_property_link("concentrations", "positions") diff --git a/PyMca5/tests/XrfTest.py b/PyMca5/tests/XrfTest.py index 2008d3f66..5636aa4c3 100644 --- a/PyMca5/tests/XrfTest.py +++ b/PyMca5/tests/XrfTest.py @@ -33,6 +33,7 @@ import unittest import os import sys +import copy import numpy if sys.version_info < (3,): from StringIO import StringIO @@ -267,10 +268,8 @@ def testTrainingDataFilePresence(self): self.assertTrue(os.path.isfile(trainingDataFile), "File %s is not an actual file" % trainingDataFile) - def testTrainingDataFit(self): + def _readTrainingData(self): from PyMca5.PyMcaIO import specfilewrapper as specfile - from PyMca5.PyMcaPhysics.xrf import ClassMcaTheory - from PyMca5.PyMcaPhysics.xrf import ConcentrationsTool from PyMca5.PyMcaIO import ConfigDict trainingDataFile = os.path.join(self.dataDir, "XRFSpectrum.mca") self.assertTrue(os.path.isfile(trainingDataFile), @@ -285,18 +284,51 @@ def testTrainingDataFit(self): "Training data 1st scan should contain no MCAs") y = mcaData = sf[1].mca(1) sf = None + x = numpy.arange(y.size).astype(numpy.float64) # perform the actual XRF analysis configuration = ConfigDict.ConfigDict() configuration.readfp(StringIO(cfg)) - mcaFit = ClassMcaTheory.ClassMcaTheory() - configuration=mcaFit.configure(configuration) - x = numpy.arange(y.size).astype(numpy.float64) - mcaFit.setData(x, y, - xmin=configuration["fit"]["xmin"], - xmax=configuration["fit"]["xmax"]) - mcaFit.estimate() - fitResult, result = mcaFit.startFit(digest=1) + + return x, y, configuration + + def _readStainlessSteelData(self): + from PyMca5.PyMcaIO import specfilewrapper as specfile + from PyMca5.PyMcaIO import ConfigDict + + # read the data + dataFile = os.path.join(self.dataDir, "Steel.spe") + self.assertTrue(os.path.isfile(dataFile), + "File %s is not an actual file" % dataFile) + sf = specfile.Specfile(dataFile) + self.assertTrue(len(sf) == 1, "File %s cannot be read" % dataFile) + self.assertTrue(sf[0].nbmca() == 1, + "Spe file should contain MCA data") + y = counts = sf[0].mca(1) + x = channels = numpy.arange(y.size).astype(numpy.float64) + sf = None + + # read the fit configuration + configFile = os.path.join(self.dataDir, "Steel.cfg") + self.assertTrue(os.path.isfile(configFile), + "File %s is not an actual file" % configFile) + configuration = ConfigDict.ConfigDict() + configuration.read(configFile) + # configure the fit + # make sure no secondary excitations are used + configuration["concentrations"]["usemultilayersecondary"] = 0 + + return x, y, configuration + + def testTrainingDataFit(self): + from PyMca5.PyMcaPhysics.xrf import LegacyMcaTheory + from PyMca5.PyMcaPhysics.xrf import ConcentrationsTool + + x, y, configuration = self._readTrainingData() + + # perform the actual XRF analysis + mcaFit = LegacyMcaTheory.LegacyMcaTheory() + configuration, fitResult, result = self._configAndFit(x, y, configuration, mcaFit) # fit is already done, calculate the concentrations concentrationsConfiguration = configuration["concentrations"] @@ -350,39 +382,14 @@ def testTrainingDataFit(self): "Error for <%s> concentration %g != %g" % (key, internal, fp)) def testStainlessSteelDataFit(self): - from PyMca5.PyMcaIO import specfilewrapper as specfile - from PyMca5.PyMcaPhysics.xrf import ClassMcaTheory + from PyMca5.PyMcaPhysics.xrf import LegacyMcaTheory from PyMca5.PyMcaPhysics.xrf import ConcentrationsTool - from PyMca5.PyMcaIO import ConfigDict - # read the data - dataFile = os.path.join(self.dataDir, "Steel.spe") - self.assertTrue(os.path.isfile(dataFile), - "File %s is not an actual file" % dataFile) - sf = specfile.Specfile(dataFile) - self.assertTrue(len(sf) == 1, "File %s cannot be read" % dataFile) - self.assertTrue(sf[0].nbmca() == 1, - "Spe file should contain MCA data") - y = counts = sf[0].mca(1) - x = channels = numpy.arange(y.size).astype(numpy.float64) - sf = None + x, y, configuration = self._readStainlessSteelData() - # read the fit configuration - configFile = os.path.join(self.dataDir, "Steel.cfg") - self.assertTrue(os.path.isfile(configFile), - "File %s is not an actual file" % configFile) - configuration = ConfigDict.ConfigDict() - configuration.read(configFile) # configure the fit - # make sure no secondary excitations are used - configuration["concentrations"]["usemultilayersecondary"] = 0 - mcaFit = ClassMcaTheory.ClassMcaTheory() - configuration=mcaFit.configure(configuration) - mcaFit.setData(x, y, - xmin=configuration["fit"]["xmin"], - xmax=configuration["fit"]["xmax"]) - mcaFit.estimate() - fitResult, result = mcaFit.startFit(digest=1) + mcaFit = LegacyMcaTheory.LegacyMcaTheory() + configuration, fitResult, result = self._configAndFit(x, y, configuration, mcaFit) # concentrations # fit is already done, calculate the concentrations @@ -417,7 +424,7 @@ def testStainlessSteelDataFit(self): self.assertTrue( testValue > 0.30, "Expected Cr concentration above 0.30 got %.3f" % testValue) - # chek the sum of concentration of main components is above 1 + # check the sum of concentration of main components is above 1 # because of neglecting higher order excitations elements = ["Cr K", "V K", "Mn K", "Fe Ka", "Ni K"] total = 0.0 @@ -440,7 +447,7 @@ def testStainlessSteelDataFit(self): abs(concentrationsResult["mass fraction"]["Fe Ka"] - 0.65) < 0.03, "Invalid Fe Concentration Using Tertiary Excitation") - # chek the sum of concentration of main components is above 1 + # check the sum of concentration of main components is above 1 elements = ["Cr K", "Mn K", "Fe Ka", "Ni K"] total = 0.0 for element in elements: @@ -456,12 +463,7 @@ def testStainlessSteelDataFit(self): # in order to get the good fundamental parameters configuration["concentrations"]['usematrix'] = 1 configuration["concentrations"]["usemultilayersecondary"] = 2 - mcaFit.setConfiguration(configuration) - mcaFit.setData(x, y, - xmin=configuration["fit"]["xmin"], - xmax=configuration["fit"]["xmax"]) - mcaFit.estimate() - fitResult, result = mcaFit.startFit(digest=1) + configuration, fitResult, result = self._configAndFit(x, y, configuration, mcaFit) # concentrations # fit is already done, calculate the concentrations @@ -510,13 +512,8 @@ def testStainlessSteelDataFit(self): "Mn", "Fe", "Ni", "-", "-", "-","-","-"] - mcaFit = ClassMcaTheory.ClassMcaTheory() - configuration=mcaFit.configure(configuration) - mcaFit.setData(x, y, - xmin=configuration["fit"]["xmin"], - xmax=configuration["fit"]["xmax"]) - mcaFit.estimate() - fitResult, result = mcaFit.startFit(digest=1) + mcaFit = LegacyMcaTheory.LegacyMcaTheory() + configuration, fitResult, result = self._configAndFit(x, y, configuration, mcaFit) # concentrations # fit is already done, calculate the concentrations @@ -531,7 +528,7 @@ def testStainlessSteelDataFit(self): fluorates = mcaFit._fluoRates, addinfo=True) - # chek the sum of concentration of main components is above 1 + # check the sum of concentration of main components is above 1 elements = ["Cr K", "Mn K", "Fe Ka", "Ni K"] total = 0.0 for element in elements: @@ -546,6 +543,236 @@ def testStainlessSteelDataFit(self): "Strategy: Element %s discrepancy too large %.1f %%" % \ (element.split()[0], delta)) + def testTrainingDataFitCompareLegacy(self): + x, y, configuration = self._readTrainingData() + self._compareLegacyMcaTheory(x, y, configuration, "Training data") + + def testStainlessSteelDataFitCompareLegacy(self): + x, y, configuration = self._readStainlessSteelData() + + with self.subTest("simple"): + configuration["concentrations"]['usematrix'] = 0 + configuration["concentrations"]["usemultilayersecondary"] = 0 + self._compareLegacyMcaTheory(x, y, configuration, "Stainless steal (simple)") + + with self.subTest("continuum"): + configuration["fit"]["continuum"] = 3 + self._compareLegacyMcaTheory(x, y, configuration, "Stainless steal (with continuum)") + configuration["fit"]["continuum"] = 0 + + with self.subTest("tails"): + configuration["fit"]["fitfunction"] = 1|2|4|8 + self._compareLegacyMcaTheory(x, y, configuration, "Stainless steal (with peak tails)") + configuration["fit"]["fitfunction"] = 1 + + with self.subTest("voigt"): + configuration["fit"]["fitfunction"] = 0 + self._compareLegacyMcaTheory(x, y, configuration, "Stainless steal (voigt)") + configuration["fit"]["fitfunction"] = 1 + + with self.subTest("usematrix"): + configuration["concentrations"]['usematrix'] = 1 + configuration["concentrations"]["usemultilayersecondary"] = 2 + self._compareLegacyMcaTheory(x, y, configuration, "Stainless steal (quantitative)") + + with self.subTest("strategy"): + configuration["concentrations"]['usematrix'] = 0 + configuration["concentrations"]["usemultilayersecondary"] = 2 + configuration["attenuators"]["Matrix"] = [1, 'Fe', 1.0, 1.0, 45.0, 45.0] + configuration["fit"]["strategyflag"] = 1 + configuration["fit"]["strategy"] = "SingleLayerStrategy" + configuration["SingleLayerStrategy"] = {} + configuration["SingleLayerStrategy"]["layer"] = "Auto" + configuration["SingleLayerStrategy"]["iterations"] = 3 + configuration["SingleLayerStrategy"]["completer"] = "-" + configuration["SingleLayerStrategy"]["flags"] = [1, 1, 1, 1, 0, + 0, 0, 0, 0, 0] + configuration["SingleLayerStrategy"]["peaks"] = [ "Cr K", + "Mn K", "Fe Ka", + "Ni K", "-", "-", + "-","-","-","-"] + configuration["SingleLayerStrategy"]["materials"] = ["Cr", + "Mn", "Fe", + "Ni", "-", "-", + "-","-","-"] + self._compareLegacyMcaTheory(x, y, configuration, "Stainless steal (quantitative with strategy)") + + def _compareLegacyMcaTheory(self, x, y, configuration, title): + from PyMca5.PyMcaPhysics.xrf import LegacyMcaTheory + from PyMca5.PyMcaPhysics.xrf import NewClassMcaTheory + + import time + t0 = time.time() + + mcaFitLegacy = LegacyMcaTheory.LegacyMcaTheory() + mcaFitLegacy.enableOptimizedLinearFit() + _, fitResult1, digestedResult1, imagingDigestResult1 = self._configAndFit( + x, y, copy.deepcopy(configuration), mcaFitLegacy) + + legacy_linear = mcaFitLegacy.config['fit'].get("linearfitflag", 0) and mcaFitLegacy._batchFlag and (mcaFitLegacy.linearMatrix is not None) + + t1 = time.time() + + mcaFit = NewClassMcaTheory.McaTheory() + configuration2 = copy.deepcopy(configuration) + configuration2["fit"]["linearfitflag"] = int(legacy_linear) + _, fitResult2, digestedResult2, imagingDigestResult2 = self._configAndFit( + x, y, configuration2, mcaFit) + + t2 = time.time() + + print() + print(title) + print(" LEGACY TIME", t1-t0) + print(" NEW TIME", t2-t1) + + # Compare data + numpy.testing.assert_array_equal(mcaFitLegacy.xdata.flat, mcaFit.xdata) + numpy.testing.assert_array_equal(mcaFitLegacy.ydata.flat, mcaFit.ydata) + numpy.testing.assert_array_equal(mcaFitLegacy.sigmay.flat, mcaFit.ystd) + numpy.testing.assert_array_equal(mcaFitLegacy.zz.flat, mcaFit.ynumbkg()) + + # Compare configuration + config1 = copy.deepcopy(mcaFitLegacy.config) + config2 = copy.deepcopy(mcaFit.config) + + # Remove expected differences + n = len(config2["fit"]["energy"]) + for name in ["energy", "energyweight", "energyflag", "energyscatter"]: + if config1["fit"][name] is None: + config1["fit"][name] = [] + else: + config1["fit"][name] = config1["fit"][name][:n] + if len(config1["attenuators"]["Matrix"]) == 6: + lst = config1["attenuators"]["Matrix"] + lst.extend([0, lst[-1]+lst[-2]]) + config1["fit"]["continuum_name"] = config2["fit"]["continuum_name"] + config1["fit"]["linearfitflag"] = config2["fit"]["linearfitflag"] + self._assertDeepEqual(config1, config2) + + # Compare fluo rate dictionaries + self.assertEqual(mcaFitLegacy._fluoRates, mcaFit._fluoRates) + + # Compare line groups + linegroups1 = mcaFitLegacy.PEAKS0 + linegroups2 = mcaFit._lineGroups + linegroups1 = [[[line[1], line[0], name] + for name, line in zip(names, lines)] + for names, lines in zip(mcaFitLegacy.PEAKS0NAMES, linegroups1)] + + self.assertEqual(len(linegroups1), len(linegroups2)) + for lg1, lg2 in zip(linegroups1, linegroups2): + self.assertEqual(len(lg1), len(lg2)) + for line1, line2 in zip(lg1, lg2): + self.assertEqual(line1[0], line2[0]) + numpy.testing.assert_allclose(line1[1], line2[1], rtol=1e-9) + self.assertEqual(line1[2], line2[2]) + + # Compare escape line groups + linegroups1 = mcaFitLegacy.PEAKS0ESCAPE + linegroups2 = mcaFit._escapeLineGroups + self.assertEqual(len(linegroups1), len(linegroups2)) + for lg1, lg2 in zip(linegroups1, linegroups2): + self.assertEqual(len(lg1), len(lg2)) + for elines1, elines2 in zip(lg1, lg2): + self.assertEqual(len(elines1), len(elines2)) + for line1, line2 in zip(elines1, elines2): + self.assertEqual(line1[0], line2[0]) + numpy.testing.assert_allclose(line1[1], line2[1], rtol=1e-9) + self.assertEqual(line1[2], line2[2]) + + # Compares parameter names + names = mcaFit.get_parameter_names() + legacy_names = mcaFitLegacy.PARAMETERS + if mcaFit.continuum > 2: + self.assertEqual(set(legacy_names), set(names)) + else: + self.assertEqual(set(legacy_names), set(names)|{"Constant", "1st Order"}) + + # Compare fit results + parameters1 = dict(zip(legacy_names, fitResult1[0])) + parameters2 = dict(zip(names, fitResult2[0])) + uncertainties1 = dict(zip(legacy_names, fitResult1[2])) + uncertainties2 = dict(zip(names, fitResult2[2])) + if False: + for name in names: + numpy.testing.assert_allclose(parameters1[name], parameters2[name], atol=1e-5, rtol=0.5, err_msg=name) + numpy.testing.assert_allclose(uncertainties1[name], uncertainties2[name], atol=1e-5, rtol=0.5, err_msg=name) + + # Compare digested results + self.assertEqual(set(digestedResult1), set(digestedResult2)) + + self.assertEqual(digestedResult1["groups"], digestedResult2["groups"]) + for groupname in digestedResult1["groups"]: + group1 = digestedResult1[groupname] + group2 = digestedResult2[groupname] + self.assertEqual(group1.keys(), group2.keys()) + self.assertEqual(group1["peaks"], group2["peaks"]) + for linename in group1["peaks"]: + line1 = group1[linename] + line2 = group2[linename] + self.assertEqual(line1.keys(), line2.keys()) + self.assertEqual(group1["escapepeaks"], group2["escapepeaks"]) + for linename in group1["escapepeaks"]: + line1 = group1[linename+"esc"] + line2 = group2[linename+"esc"] + self.assertEqual(line1.keys(), line2.keys()) + + #self._vis_compare(digestedResult1["yfit"], digestedResult2["yfit"], "yfit") + numpy.testing.assert_allclose(digestedResult1["xdata"], digestedResult2["xdata"]) + numpy.testing.assert_allclose(digestedResult1["ydata"], digestedResult2["ydata"]) + numpy.testing.assert_allclose(digestedResult1["energy"], digestedResult2["energy"], rtol=1e-3) + numpy.testing.assert_allclose(digestedResult1["continuum"], digestedResult2["continuum"], atol=1, rtol=0.05) + numpy.testing.assert_allclose(digestedResult1["yfit"], digestedResult2["yfit"], atol=1, rtol=0.05) + numpy.testing.assert_allclose(digestedResult1["pileup"], digestedResult2["pileup"], atol=1, rtol=0.05) + + # Compare image digested results + self.assertEqual(set(imagingDigestResult1), set(imagingDigestResult2)) + + self.assertEqual(imagingDigestResult1["groups"], imagingDigestResult2["groups"]) + for groupname in imagingDigestResult1["groups"]: + group1 = imagingDigestResult1[groupname] + group2 = imagingDigestResult2[groupname] + self.assertEqual(group1.keys(), group2.keys()) + self.assertEqual(group1["peaks"], group2["peaks"]) + + def _configAndFit(self, x, y, configuration, mcaFit): + configuration = mcaFit.configure(configuration) + mcaFit.setData(x, y, + xmin=configuration["fit"]["xmin"], + xmax=configuration["fit"]["xmax"]) + mcaFit.estimate() + fitResult, digestedResult = mcaFit.startFit(digest=1) + imagingDigestResult = mcaFit.imagingDigestResult() + return configuration, fitResult, digestedResult, imagingDigestResult + + def _vis_compare(self, a, b, title): + import matplotlib.pyplot as plt + + plt.plot(a) + plt.plot(b) + plt.title(title) + plt.show() + + def _assertDeepEqual(self, obj1, obj2, _nodes=tuple()): + """Better verbosity than assertEqual for deep structures + """ + err_msg = "->".join(_nodes) + if isinstance(obj1, dict): + self.assertEqual(set(obj1.keys()), set(obj2.keys()), err_msg) + for k in obj1: + self._assertDeepEqual(obj1[k], obj2[k], _nodes=_nodes+(k,)) + elif isinstance(obj1, (list, tuple)): + if isinstance(obj1[0], (list, tuple, numpy.ndarray)): + self._assertDeepEqual(obj1, obj2, _nodes=_nodes+(len(_nodes),)) + else: + self.assertEqual(obj1, obj2, err_msg) + elif isinstance(obj1, numpy.ndarray): + numpy.testing.assert_allclose(obj1, obj2, rtol=0, err_msg=err_msg) + else: + self.assertEqual(obj1, obj2, err_msg) + + def getSuite(auto=True): testSuite = unittest.TestSuite() if auto: @@ -556,11 +783,15 @@ def getSuite(auto=True): testSuite.addTest(testXrf("testTrainingDataFilePresence")) testSuite.addTest(testXrf("testTrainingDataFit")) testSuite.addTest(testXrf("testStainlessSteelDataFit")) + testSuite.addTest(testXrf("testTrainingDataFitCompareLegacy")) + testSuite.addTest(testXrf("testStainlessSteelDataFitCompareLegacy")) return testSuite + def test(auto=False): return unittest.TextTestRunner(verbosity=2).run(getSuite(auto=auto)) + if __name__ == '__main__': if len(sys.argv) > 1: auto = False diff --git a/appveyor.yml b/appveyor.yml.bak similarity index 100% rename from appveyor.yml rename to appveyor.yml.bak diff --git a/setup.py b/setup.py index 1085aeb8c..bd236c827 100644 --- a/setup.py +++ b/setup.py @@ -176,6 +176,7 @@ def use_gui(): 'PyMca5.PyMcaMisc', 'PyMca5.PyMcaMath', 'PyMca5.PyMcaMath.fitting', + 'PyMca5.PyMcaMath.fitting.model', 'PyMca5.PyMcaMath.mva', 'PyMca5.PyMcaMath.mva.py_nnma', 'PyMca5.PyMcaGraph', 'PyMca5.PyMcaGraph.backends',