test_annotate more stable
Could we have a more stable test_annotate
?
If you deal with M-pixels obviously the sum of the errors is high.
Basically it would be much more stable to compare pixels per pixels.
is_ok = numpy.alltrue(numpy.allclose(compare, copy, atol=1))
What do you think?
_____________________________ test_annotate[data1] _____________________________ data = {'beam': {'center': [2140082.632109303, -4231399.999999999], 'position': [2140199.9999999995, -4231399.999999999]}, 'd...': {'center': [2045848.0499999993, -4166239.9399999995], 'pixelsize': [-1497.6499999999976, -1551.4300000000046]}, ...} @pytest.mark.parametrize("data", (DATA, DATA2)) def test_annotate(data): orig_path = f"{DIR_PATH}/assets/downstream_{data['downstream']}.png" compare_path = f"{DIR_PATH}/assets/downstream_{data['downstream']}annotated.png" copy_path = f"/tmp/downstream{data['downstream']}_test.png"
with Image.open(orig_path) as orig:
copy = orig.copy()
copy.save(copy_path)
ann = AnnotateImage(copy_path)
ann.annotate(
data["image"]["center"],
data["beam"]["position"],
data["image"]["pixelsize"],
1e-9,
data["subsample"],
)
with Image.open(compare_path) as compare:
with Image.open(copy_path) as copy:
img_diff = ImageChops.difference(compare, copy)
diff = np.array(img_diff)
assert diff.sum() == approx(0)
E assert 17520 == 0 ± 1.0e-12 E comparison failed E Obtained: 17520 E Expected: 0 ± 1.0e-12