"""Fitting utilities for the ramantools package.
Holds the peak shape functions (``gaussian``, ``lorentz``, ``lorentz2``),
the ``polynomial_fit`` wrapper, the ``bgsubtract`` background estimator,
and the ``peakfit`` xarray curve-fit wrapper. These are all module-level
utilities (no class state) and were grouped together in the original
single-file layout under a ``## Tools`` section.
"""
# Defer evaluation of annotations so we can use modern ``X | None`` syntax
# in function signatures without depending on Python 3.10+ at runtime.
from __future__ import annotations
import numpy as np # type: ignore
import matplotlib.pyplot as plt # type: ignore
from scipy.optimize import curve_fit # type: ignore
from scipy.signal import find_peaks # type: ignore
# _midpoint is needed in peakfit's ``toplot`` branch to pick a central
# spectrum for plotting when no explicit width/height is given.
from ._helpers import _midpoint
# Named thresholds previously spelled as magic numbers — see _constants.py
# for the rationale behind each value.
from ._constants import (
NOTCH_CUTOFF_CM,
NORMALIZE_CROP_REGION_CM,
NORMALIZE_BG_THRESHOLD,
CALIBRATE_FITRANGE_CM,
PEAKFIT_MAXFEV,
)
def _compute_calibshift(spec_1d, peakshift: float, calibfactor: float, **kwargs) -> float:
"""Compute the calibration shift for a target peak.
Shared core of ``ramanmap.calibrate`` and ``singlespec.calibrate``.
Given a 1-D Raman spectrum ``spec_1d`` (an xarray DataArray with a
``ramanshift`` coord), figures out how much to shift the
``ramanshift`` axis so the peak near ``peakshift`` lands exactly at
``peakshift``.
Algorithm (unchanged from pre-refactor):
* If ``calibfactor != 0`` the caller already knows the shift —
return it verbatim and skip the fit.
* Otherwise crop the spectrum to ±100 cm⁻¹ around ``peakshift``,
fit a peak there, and return ``peakshift - fitted_x0``.
``**kwargs`` flow through to ``peakfit``, matching the original
user-facing signatures.
"""
if calibfactor != 0:
# Caller supplied the shift directly — no fit needed.
return calibfactor
# Crop to a generous ±CALIBRATE_FITRANGE_CM window so the peak fit
# has enough baseline on either side; clip to actual data bounds
# so ``sel`` doesn't return an empty slice at the spectrum edges.
fitrange = [peakshift - CALIBRATE_FITRANGE_CM, peakshift + CALIBRATE_FITRANGE_CM]
if fitrange[0] < spec_1d.ramanshift.min().data:
fitrange[0] = spec_1d.ramanshift.min().data
if fitrange[1] > spec_1d.ramanshift.max().data:
fitrange[1] = spec_1d.ramanshift.max().data
spectofit_crop = spec_1d.sel(ramanshift=slice(fitrange[0], fitrange[1]))
fit = peakfit(spectofit_crop, stval={'x0': peakshift}, **kwargs)
# correction factor relative to the expected value: peakshift
return peakshift - fit['curvefit_coefficients'].sel(param='x0').data
def _normalize_to_peak(whole_da, peakshift: float, ref_coords=None, mode: str = 'const', **kwargs):
"""Normalize ``whole_da`` to the amplitude of a peak near ``peakshift``.
Shared core of ``ramanmap.normalize`` and ``singlespec.normalize``.
Returns ``(normalized_da, peakampl, peakpos)`` — the caller is
responsible for wrapping the result into a new instance and
appending its class-specific comment line.
Parameters
----------
whole_da
Full DataArray (map or spec) to be normalized. Its ``.attrs['comments']``
must mention ``'background subtracted'``; otherwise the
consistency check below raises.
peakshift
Rough position of the reference peak in cm⁻¹.
ref_coords
Optional dict of non-ramanshift coordinate selections (e.g.
``{'width': 5.0, 'height': 3.0}``) identifying the reference
spectrum for the ``'const'`` fit. When ``None`` the midpoint
of each non-ramanshift dim is used. For 1-D spectra the dict
is naturally empty and ``ref_cropped`` becomes the whole
cropped array.
mode
``'const'`` fits only the reference spectrum (scalar
``peakampl``); ``'individual'`` fits per-pixel over the
cropped region (``peakampl`` becomes a 2-D DataArray).
Raises
------
ValueError
If the background was not subtracted before calling
``normalize`` or the selected peak is too close to the
baseline to normalize against.
"""
# Crop to ±NORMALIZE_CROP_REGION_CM around the peak — wide enough
# to give the peakfit clean baseline on both shoulders.
cropped = whole_da.sel(ramanshift=slice(
peakshift - NORMALIZE_CROP_REGION_CM,
peakshift + NORMALIZE_CROP_REGION_CM,
))
# Determine reference coordinates. For maps we want the midpoint
# of (width, height); for spectra there are no extra dims so the
# dict is empty (and ``.sel(**{}) == no-op``).
if ref_coords is None:
ref_coords = {}
for dim in whole_da.dims:
if dim == 'ramanshift':
continue
ref_coords[dim] = _midpoint(whole_da.coords[dim].data)
# Reference (1-D) cropped spectrum for the bg sanity check and the
# 'const'-mode fit. ``method='nearest'`` matches the pre-refactor
# behaviour so float-coordinate selections don't raise KeyError.
if ref_coords:
ref_cropped = cropped.sel(**ref_coords, method='nearest')
else:
ref_cropped = cropped
# take the offset value, as the intensity value near the peak edge
bgoffset_low = ref_cropped[0].data
bgoffset_high = ref_cropped[-1].data
# Sanity check: if the spectrum baseline is high (above
# NORMALIZE_BG_THRESHOLD) or the 'background subtracted' marker is
# absent from the processing history, refuse to normalize. Same
# message as the original so upstream error-matching tests pass.
if ((bgoffset_high + bgoffset_low) / 2 > NORMALIZE_BG_THRESHOLD) or ('background subtracted' not in whole_da.attrs['comments']):
raise ValueError(
"The background was not removed, or the peak selected is not suitable "
"for normalization. This should be done in case of normalizing to a peak amplitude"
)
# Starting-value for the offset parameter: midway between the two
# endpoint values of the cropped spectrum.
offset_start = (bgoffset_high + bgoffset_low) / 2
if mode == 'const':
# Fit only the reference spectrum — yields scalar peakampl / peakpos.
fit = peakfit(
ref_cropped,
stval={'x0': peakshift, 'offset': offset_start},
**kwargs,
)
peakampl = fit['curvefit_coefficients'].sel(param='ampl').data
peakpos = fit['curvefit_coefficients'].sel(param='x0').data
elif mode == 'individual':
# Per-pixel fit across the map — peakampl becomes a DataArray
# over the non-ramanshift dims. peakpos is still captured as a
# scalar (at the reference coords) for the comment line,
# preserving the original behaviour in ``ramanmap.normalize``.
fit = peakfit(
cropped,
stval={'x0': peakshift, 'offset': offset_start},
**kwargs,
)
peakampl = fit['curvefit_coefficients'].sel(param='ampl').data
# Note: no ``method='nearest'`` here — matches the pre-refactor
# behaviour which required an exact-match selection.
peakpos = fit['curvefit_coefficients'].sel(param='x0').sel(**ref_coords).data
else:
# _require_mode is the normal guard path in the class methods;
# keeping this branch defensive so direct callers also fail loudly.
raise ValueError("`mode` parameter must be either: 'const' or 'individual'")
# Divide through by the amplitude. ``peakampl`` broadcasts naturally
# over the map dims thanks to xarray's alignment rules.
normalized = whole_da / peakampl
normalized.attrs = whole_da.attrs.copy()
normalized.attrs['units'] = ' '
normalized.attrs['long_name'] = 'normalized Raman intensity'
return normalized, peakampl, peakpos
def _reject_double_peak(func, method_name: str) -> None:
"""Raise ``ValueError`` if ``func`` is the double-peak shape.
``calibrate`` and ``normalize`` both hardcode ``.sel(param='x0')``
(and, for normalize, ``.sel(param='ampl')``) on fit results. When
``func=lorentz2`` is supplied the parameter names become
``x01``/``x02`` / ``ampl1``/``ampl2`` and those selections fail
deep inside xarray with a confusing KeyError.
The guard was inlined in four places before the refactor — all
with identical wording. Centralizing both the check and the
message here preserves byte-level error parity while removing the
duplication.
Lives in ``_fitting.py`` (alongside ``lorentz2`` itself) rather
than in ``_helpers.py`` to avoid a ``_helpers → _fitting`` circular
import. Callers in map / spec import this function directly.
"""
# ``is`` identity instead of ``==`` because ``lorentz2`` is compared
# against a callable; function objects are singletons.
if func is lorentz2:
raise ValueError(
f"{method_name}() requires a single-peak shape function "
"(use func=gaussian or func=lorentz); lorentz2 is not supported."
)
[docs]
def gaussian(x, x0: float = 1580, ampl: float = 10, width: float = 15, offset: float = 0):
"""Gaussian function. Width and amplitude parameters have the same meaning as for :func:`lorentz`.
:param x: values for the x coordinate
:type x: float, :py:mod:`numpy` array, etc.
:param x0: shift along the `x` corrdinate
:type x0: float
:param ampl: amplitude of the peak
:type ampl: float
:param width: FWHM of the peak
:type width: float
:param offset: offset along the function value
:type offset: float
:return: values of a Gaussian function
:rtype: float, :py:mod:`numpy` array, etc.
"""
# using the FWHM for the width
return offset + ampl * np.exp(-4*np.log(2)*(x - x0)**2 / (width**2))
[docs]
def lorentz(x, x0: float = 1580, ampl: float = 1, width: float = 14, offset: float = 0):
"""Single Lorentz function
:param x: values for the x coordinate
:type x: float, :py:mod:`numpy` array, etc.
:param x0: shift along the `x` corrdinate
:type x0: float
:param ampl: amplitude of the peak
:type ampl: float
:param width: FWHM of the peak
:type width: float
:param offset: offset along the function value
:type offset: float
:return: values of a single Lorentz function
:rtype: float, :py:mod:`numpy` array, etc.
.. note::
The area of the peak can be given by:
.. code-block:: python
area = np.pi * amplitude * width / 2
"""
area = np.pi * ampl * width / 2
return offset + (2/np.pi) * (area * width) / (4*(x - x0)**2 + width**2)
[docs]
def lorentz2(x, x01: float = 2700, ampl1: float = 1, width1: float = 15, x02: float = 2730, ampl2: float = 1, width2: float = 15, offset: float = 0):
"""Double Lorentz function
:return: values of a double Lorentz function
:rtype: float, :py:mod:`numpy` array, etc.
:param x: values for the x coordinate
:type x: float, :py:mod:`numpy` array, etc.
:param x0: shift along the `x` corrdinate
:type x0: float
:param area: area of the peak
:type area: float
:param width: width (FWHM) of the peak
:type width: float
:param offset: offset along the function value
:type offset: float
"""
area1 = np.pi * ampl1 * width1 / 2
area2 = np.pi * ampl2 * width2 / 2
return offset + (2/np.pi) * (area1 * width1) / (4*(x - x01)**2 + width1**2) + (2/np.pi) * (area2 * width2) / (4*(x - x02)**2 + width2**2)
[docs]
def polynomial_fit(order: int, x_data, y_data):
"""Polinomial fit to `x_data`, `y_data`
:param order: order of the polinomial to be fit
:type order: int
:param x_data: x coordinate of the data, this would be Raman shift
:type x_data: :py:mod:`numpy` array
:param y_data: y coordinate of the data, this would be Raman intensity
:type y_data: :py:mod:`numpy` array
:return: coefficients of the polinomial ``coeff``, as used by :py:mod:`numpy.polyval`, covariance matrix ``covar``, as returned by :py:mod:`scipy.optimize.curve_fit`
:rtype: tuple: (:py:mod:`numpy` array, :py:mod:`numpy` array)
"""
# Define polynomial function of given order
def poly_func(x, *coeffs):
y = np.polyval(coeffs, x)
return y
# Initial guess for the coefficients is all ones
guess = np.ones(order + 1)
# Use curve_fit to find best fit parameters
coeff, covar = curve_fit(poly_func, x_data, y_data, p0 = guess)
return coeff, covar
[docs]
def bgsubtract(x_data, y_data, polyorder: int = 1, toplot: bool = False, fitmask = None, hmin: float = 50, hmax: float = 10000, wmin: float = 4, wmax: float = 60, prom: float = 10, exclusion_factor: float = 6, peak_pos = None):
"""Takes Raman shift and Raman intensity data and automatically finds peaks in the spectrum, using :py:mod:`scipy.find_peaks`.
These peaks are then used to define the areas of the background signal.
In the areas with the peaks removed, the background is fitted by a polynomial of order given by the optional argument: ``polyorder``.
The fit is performed by :py:mod:`scipy.optimize.curve_fit`.
The function returns the Raman intensity counts with the background removed, the background polinomial values themselves and the coefficients of the background fit results, as used by :py:mod:`numpy.polyval`.
In cases, where the automatic peak find is not functioning as expected, one can pass the values in ``x_data``, at which peaks appear.
In this case, the ``wmin`` option determines the width of all peaks.
If a ``fitmask`` is supplied for fitting, the fitmask is not calculated and only a polynomial fit is performed.
This can decrease the runtime.
:param x_data: Raman shift values
:type x_data: :py:mod:`numpy` array
:param y_data: Raman intensity values
:type y_data: :py:mod:`numpy` array
:param polyorder: order of polynomial used to fit the background, defaults to 1
:type polyorder: int, optional
:param toplot: if `True` a plot of: the fit, the background used and positions of the peaks is shown, defaults to False
:type toplot: bool, optional
:param fitmask: Fitmask to be used for polynomial fitting.
:type fitmask: :py:mod:`numpy` array
:param hmin: minimum height of the peaks passed to :py:mod:`scipy.signal.find_peaks`, defaults to 50
:type hmin: float, optional
:param hmax: maximum height of the peaks passed to :py:mod:`scipy.signal.find_peaks`, defaults to 10000
:type hmax: float, optional
:param wmin: minimum width of the peaks, passed to :py:mod:`scipy.signal.find_peaks`, defaults to 4
:type wmin: float, optional
:param wmax: maximum width of the peaks passed to :py:mod:`scipy.signal.find_peaks`, defaults to 60
:type wmax: float, optional
:param prom: prominence of the peaks, passed to :py:mod:`scipy.signal.find_peaks`, defaults to 10
:type prom: float, optional
:param exclusion_factor: this parameter multiplies the width of the peaks found by :py:mod:`scipy.signal.find_peaks`, or specified by ``wmin`` if the peak positions are passed by hand, using ``peak_pos``, defaults to 6
:type exclusion_factor: float, optional
:param peak_pos: list of the peak positions in ``x_data`` values used for exclusion, defaults to None
:type peak_pos: list of floats, optional
:return: ``y_data_nobg``, ``bg_values``, ``coeff``, ``params_used_at_run``, ``mask``, ``covar``
:rtype: tuple: (:py:mod:`numpy` array, :py:mod:`numpy` array, :py:mod:`numpy` array, dictionary, :py:mod:`numpy` array, :py:mod:`numpy` array)
* ``y_data_nobg``: Raman counts, with the background subtracted,
* ``bg_values``: the polynomial values of the fit, at the Raman shift positions,
* ``coeff``: coefficients of the polynomial fit, as used by: :py:mod:`numpy.polyval`,
* ``params_used_at_run``: parameters used at runtime
* ``mask``: the calculated fitmask
* ``covar``: covariance of the fit parameters
.. note::
Using the option: ``peak_pos``, a ``wmin*exclusion_factor/2`` region (measured in datapoints) on both sides of the peaks is excluded from the background fit.
If automatic peak finding is used, the exclusion area is calculated in a similar way, but the width of the individual peaks are used, as determined by :py:mod:`scipy.signal.find_peaks`.
"""
# if a mask is passed to the function, don't calculate the peak positions
if fitmask is None:
if peak_pos is None:
# Find the peaks with specified minimum height and width
# re_height sets the width from the maximum at which value the width is evaluated
peak_properties = find_peaks(y_data, height = (hmin, hmax), width = (wmin, wmax), prominence = prom, rel_height = 0.5)
# Find the indices of the peaks
peak_indices = peak_properties[0]
# Get the properties of the peaks
peak_info = peak_properties[1]
else:
# Use the provided peak positions
peak_indices = []
for peak_position in peak_pos:
# Find the index of the closest data point to the peak position
closest_index = np.argmin(np.abs(x_data - peak_position))
peak_indices.append(closest_index)
# Calculate the widths of the peaks using the data
peak_widths = [wmin] * len(peak_indices) # Use the minimum width if peak widths cannot be calculated from the data
peak_info = {'widths': peak_widths}
# Calculate the start and end indices of each peak with a larger exclusion area
start_indices = peak_indices - (exclusion_factor * np.array(peak_info['widths'])).astype(int)
end_indices = peak_indices + (exclusion_factor * np.array(peak_info['widths'])).astype(int)
# Ensure indices are within data bounds
start_indices = np.maximum(start_indices, 0)
end_indices = np.minimum(end_indices, len(x_data) - 1)
# Define the indices covered by the peaks
covered_indices = []
for start, end in zip(start_indices, end_indices):
covered_indices.extend(range(start, end + 1))
# Remove these indices from the data
mask = np.ones(x_data.shape[0], dtype = bool)
mask[covered_indices] = False
else:
# if a mask was passed, use that
mask = fitmask
peak_indices = None
# make the mask False for the region below the notch filter
# cutoff (~80 cm^-1 physically; NOTCH_CUTOFF_CM leaves a margin).
x_data_notch = x_data[x_data < NOTCH_CUTOFF_CM]
mask[:x_data_notch.shape[0]] = False
uncovered_x_data = x_data[mask]
uncovered_y_data = y_data[mask]
# Fit polynomial to the remaining data
coeff, covar = polynomial_fit(polyorder, uncovered_x_data, uncovered_y_data)
# Calculate the fitted polynomial values
bg_values = np.polyval(coeff, x_data)
# Line subtracted data
y_data_nobg = y_data - bg_values
if toplot == True:
# Plot the data and peaks
plt.plot(x_data, y_data, label = 'Raman spectrum')
# Highlight the peaks
if fitmask is None:
plt.scatter(x_data[peak_indices], y_data[peak_indices], color = 'green', label = 'peaks')
else:
pass
# Plot the fitted polynomial
plt.plot(x_data, bg_values, color = 'k', ls = "dashed", label = 'fitted polynomial')
# Highlight the background used for fitting
plt.scatter(uncovered_x_data, uncovered_y_data, color = 'red', marker= 'o', alpha = 1, label = 'background used for fit') # type: ignore
plt.xlabel('Raman shift (cm$^{-1}$)')
plt.ylabel('Raman intensity (a.u.)')
plt.title('Data plot with peaks, fitted line and background highlighted.')
plt.legend()
params_used_at_run = {'polyorder': polyorder, 'hmin': hmin, 'hmax': hmax, 'wmin': wmin, 'wmax': wmax, 'prom':prom, 'exclusion_factor': exclusion_factor, 'peak_pos': peak_pos}
return y_data_nobg, bg_values, coeff, params_used_at_run, mask, covar
[docs]
def peakfit(xrobj, func = lorentz, fitresult = None, stval = None, bounds = None, toplot: bool = False, width: float | None = None, height: float | None = None, **kwargs):
"""Fitting a function to peaks in the data contained in ``xrobj``, which can be a single spectrum, a map or a selected spectrum from a map.
:param xrobj: :py:mod:`xarray` DataArray, of a single spectrum or a map.
:type xrobj: :py:mod:`xarray` DataArray
:param func: function to be used for fitting, defaults to lorentz
:type func: function, optional
:param fitresult: an :py:mod:`xarray` Dataset of a previous fit calculation, with matching dimensions. If this is passed to :func:`peakfit`, the fit calculation in skipped and the passed Dataset is used.
:type fitresult: :py:mod:`xarray` Dataset, optional
:param stval: starting values for the fit parameters of ``func``. You are free to specify only some of the values, the rest will be filled by defaults. Defaults are given in the starting values for keyword arguments in ``func``.
:type stval: dictionary of ``func`` parameters, optional
:param bounds: bounds for the fit parameters, used by :py:mod:`xarray.curvefit`. Simlar dictionary, like ``stval``, but the values area a list, with lower and upper components. Defaults to None
:type bounds: dictionary of ``func`` parameters, with tuples containing lower and upper bounds, optional
:param toplot: plot the fit result, defaults to ``False``
:type toplot: boolean, optional
:param width: width parameter of an :py:mod:`xarray` map to be used in conjunction with ``toplot = True``
:type width: `int` or `float`, optional
:param height: height parameter of an :py:mod:`xarray` map to be used in conjunction with ``toplot = True``
:type height: `int` or `float`, optional
:return: fitted parameters of ``func`` and covariances in a Dataset
:rtype: :py:mod:`xarray` Dataset
:Example:
.. code-block:: python
import ramantools as rt
map_path = r'data path on you machine'
info_path = r'metadata path on your machine'
# use raw strings, starting with `r'` to escape special characters, such as backslash
# Load a map
map = rt.ramanmap(map_path, info_path)
# Creating the dictionary for the starting values and the bounds
# The default function is `lorentz`, with parameters:
p = {'x0': 2724, 'ampl': 313, 'width': 49, 'offset': 0}
# passing the starting values contained in `p` and bounds: `b` to the `peakfit()` method.
b = {'x0': [2500, 2900], 'ampl': [0, 900], 'width': [20, 100], 'offset': [-10, 50]}
mapfit = rt.peakfit(m_nobg.mapxr, stval = p, bounds = b, toplot = True)
.. note::
- Use ``toplot`` = `True` to tweak the starting values. If ``toplot`` = `True`, in case of a map, if no ``width`` and ``height`` are specified, the middle of the map is used for plotting.
- Passing a ``bounds`` dictionary to :func:`peakfit` seems to increase the fitting time significantly. This might be an issue with :py:mod:`xarray.DataArray.curvefit`.
- By passing a previous fit result, using the optional parameter ``fitresult``, we can just plot the fit result at multiple regions of the map.
- In case of using double Lorentz fitting, the names of the parameters change! See: :func:`lorentz2`.
.. seealso::
It is good practice, to crop the data to the vicinity of the peak you want to fit to.
"""
# get the parameters used by the function: func
# and also get the default values for the keyword arguments
param_count = func.__code__.co_argcount
param_names = func.__code__.co_varnames[:param_count]
defaults = func.__defaults__
# get the starting index for the keyword arguments
kwargs_start_index = param_count - len(defaults)
# make a dictionary with the keyword arguments (parameters) and their default values specified in the function: func
kwargs_with_defaults = dict(zip(param_names[kwargs_start_index:], defaults))
# loop over the keys in stval and fill missing values with defaults
if stval is None:
stval = kwargs_with_defaults
else:
# if only some values are missing, fill in the rest
for key in kwargs_with_defaults.keys():
if key not in stval:
stval[key] = kwargs_with_defaults[key]
# fit
# the `xrobj` should have a 'ramanshift' coordinate
# `nan_policy = omit` skips values with NaN. This is passed to scipy.optimize.curve_fit
# `max_nfev` is passed to leastsq(). It determines the number of function calls, before quitting.
if fitresult is None:
fit = xrobj.curvefit('ramanshift', func, p0 = stval, bounds = bounds, errors = 'ignore', kwargs = {'maxfev': PEAKFIT_MAXFEV})
else:
fit = fitresult
# plot the resulting fit
if toplot is True:
#check if the xrobj is a single spectrum or map
if 'width' in xrobj.dims:
# it's a map
if (width is not None) and (height is not None):
# get coordinates to plot, or take the middle spectrum
plotwidth = width
plotheight = height
else:
# Use the helper to compute central coordinates for plotting
plotwidth = _midpoint(xrobj.width.data)
plotheight = _midpoint(xrobj.height.data)
paramnames = fit['curvefit_coefficients'].sel(width = plotwidth, height = plotheight, method = 'nearest').param.values
funcparams = fit['curvefit_coefficients'].sel(width = plotwidth, height = plotheight, method = 'nearest').data
# plot the data
xrobj.sel(width = plotwidth, height = plotheight, method = 'nearest').plot(color = 'black', marker = '.', lw = 0, label = 'data')
else:
paramnames = fit['curvefit_coefficients'].param.values
funcparams = fit['curvefit_coefficients'].data
# plot the data
xrobj.plot(color = 'black', marker = '.', lw = 0, label = 'data')
# print the starting and fitted values of the parameters
print('Values of starting parameters: \n', stval, '\n')
print('Values of fitted parameters:\n')
for name, fitvalues in zip(paramnames, funcparams):
print(name, ':', f'{fitvalues:.2f}')
shiftmin = min(xrobj.ramanshift.data)
shiftmax = max(xrobj.ramanshift.data)
shift = np.linspace(shiftmin, shiftmax, num = int((shiftmax - shiftmin)*100))
funcvalues = func(shift, *funcparams)
# set plotting variables based on the datapoints
# this should be the ramanshift of the peak maximum if the fit was successful
fitpeakpos = shift[np.argmax(funcvalues)]
plotarea_x = 100
plotarea_y = [0.8*np.min(funcvalues), 1.1*np.max(funcvalues)]
plt.plot(shift, funcvalues, color = 'red', lw = 3, alpha = 0.5, label = 'fit')
plt.xlim([fitpeakpos - plotarea_x, fitpeakpos + plotarea_x])
plt.ylim(plotarea_y)
plt.legend()
# copy attributes to the fit dataset, update the 'comments'
fit.attrs = xrobj.attrs.copy()
# update the comments if it exists
if hasattr(fit, 'comments'):
fit.attrs['comments'] += 'peak fitting, using ' + str(func.__name__) + '\n'
return fit