Coverage for local_installation/dynasor/post_processing/average_runs.py: 95%
33 statements
« prev ^ index » next coverage.py v7.3.2, created at 2024-12-21 12:02 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2024-12-21 12:02 +0000
1import numpy as np
2from dynasor.sample import Sample
3from typing import List
4from copy import deepcopy
7def get_sample_averaged_over_independent_runs(
8 samples: List[Sample], live_dangerously=False) -> Sample:
9 """
10 Compute an averaged sample from multiple samples obtained from identical independent runs.
12 Note, all the meta_data and dimensions in all samples must be the same,
13 else ValueError is raised (unless ``live_dangerously`` is set to True).
15 Parameters
16 ----------
17 samples
18 list of all sample objects to be averaged over
19 live_dangerously
20 setting True allows for averaging over samples which meta-data information is not identical.
21 """
23 # get meta data and dimensions from first sample
24 sample_ref = samples[0]
25 data_dict = dict()
26 meta_data = deepcopy(sample_ref.meta_data)
28 # test that all samples have identical dimensions
29 for sample in samples:
30 assert sorted(sample.dimensions) == sorted(sample_ref.dimensions)
31 for dim in sample_ref.dimensions:
32 assert np.allclose(sample[dim], sample_ref[dim])
34 for dim in sample_ref.dimensions:
35 data_dict[dim] = sample_ref[dim]
37 # test that all samples have identical meta_data
38 if not live_dangerously: 38 ↛ 54line 38 didn't jump to line 54, because the condition on line 38 was never false
39 for sample in samples:
40 assert len(sample.meta_data) == len(meta_data)
42 for key, val in meta_data.items():
43 if isinstance(val, dict):
44 for k, v in val.items():
45 assert sample_ref.meta_data[key][k] == sample.meta_data[key][k]
46 elif isinstance(val, np.ndarray):
47 assert np.allclose(sample.meta_data[key], val)
48 elif isinstance(val, float): 48 ↛ 49line 48 didn't jump to line 49, because the condition on line 48 was never true
49 assert np.isclose(sample.meta_data[key], val)
50 else:
51 assert sample.meta_data[key] == val
53 # average all correlation functions
54 for key in sample.available_correlation_functions:
55 data = []
56 for sample in samples:
57 data.append(sample[key])
58 data_average = np.nanmean(data, axis=0)
59 data_dict[key] = data_average
61 return sample.__class__(data_dict, **meta_data)