now there is an image processing to compute the radial profile (needed for graffy/lipase#3).

This commit is contained in:
Guillaume Raffy 2020-04-06 16:35:28 +02:00
parent e8b1e331fe
commit 4daf2bfe91
6 changed files with 262 additions and 209 deletions

View File

@ -0,0 +1,123 @@
"""
an image processing technique to produce a 1D signal for each pixel : this 1D signal is obtained by projecting the neighborhood of this pixel on a set of bases (each base is used as a convilution kernel)
https://subversion.ipr.univ-rennes1.fr/repos/main/projects/antipode/src/python/antipode/texori.py
"""
from imageengine import IImageEngine, PixelType
class IProjectorBase(object):
"""
a set of projectors
a projector is a grey level image, in which each pixel value is acting as a weight
"""
def __init__(self):
pass
def get_num_projectors(self):
pass
def get_projector_kernel(self, projector_index):
pass
def create_circle_image(image_size, circle_radius, circle_pos, circle_thickness, background_value=0.0, circle_value=1.0):
"""
:param dict image_size:
"""
ie = IImageEngine.get_instance()
width = image_size['width']
height = image_size['height']
assert isinstance(width, int)
assert isinstance(height, int)
circle_pos_x = float(circle_pos['x'])
circle_pos_y = float(circle_pos['y'])
r_min = max(0.0, circle_radius - circle_thickness * 0.5)
r_max = circle_radius + circle_thickness * 0.5
r_min_square = r_min * r_min
r_max_square = r_max * r_max
image = ie.create_image(width=width, height=height, pixel_type=PixelType.F32)
for y in range(height):
for x in range(width):
dx = x - circle_pos_x
dy = y - circle_pos_y
r_square = (dx * dx + dy * dy)
if r_min_square < r_square < r_max_square:
pixel_value = circle_value
else:
pixel_value = background_value
image.set_pixel(x, y, pixel_value)
return image
class CircularSymmetryProjectorBase(IProjectorBase):
"""
generates a base of circles
"""
def __init__(self, max_radius, oversampling_scale=2):
"""
:param int max_radius: the biggest circle radius in the set of projectors
:param int oversampling_scale: oversampling is used to generate antialased circles. The higher this value, the better the quality of antialiasing is
"""
super(CircularSymmetryProjectorBase, self).__init__()
assert max_radius > 0
assert oversampling_scale > 0
self.max_radius = max_radius
self.oversampling_scale = oversampling_scale
self.num_projectors = int(max_radius) + 1
def get_num_projectors(self):
return self.num_projectors
def get_projector_kernel(self, projector_index):
assert projector_index < self.get_num_projectors()
oversampling_is_handled = False # TODO: handle oversampling to increase quality
if not oversampling_is_handled and self.oversampling_scale != 1 :
raise NotImplementedError("at the moment, oversampling is not yet implemented")
radius = projector_index
image_size = int(self.max_radius) * 2 + 1
circle_pos = {'x': self.max_radius, 'y': self.max_radius}
oversampled_circle = create_circle_image(
image_size={'width': image_size * self.oversampling_scale, 'height': image_size*self.oversampling_scale},
circle_radius=radius * self.oversampling_scale,
circle_pos={'x': self.max_radius* self.oversampling_scale, 'y': self.max_radius* self.oversampling_scale},
circle_thickness=1 * self.oversampling_scale)
if self.oversampling_scale == 1:
circle_image = oversampled_circle
else:
if oversampling_is_handled:
circle_image = oversampled_circle.resample(width=image_size, height=image_size)
# mean_value = circle_image.get_mean_value()
# num_pixels = image_size * image_size
# sum_of_pixel_values = mean_value * num_pixels
# we want each circle to have a total weight of 1.0, regardless their radius
# circle_image.scale_values(1.0/sum_of_pixel_values)
anchor_point = {'x': int(circle_pos['x']), 'y': int(circle_pos['y'])}
return circle_image, anchor_point
class CircularSymmetryDetector:
def __init__(self, max_radius):
"""
:para float max_radius:
"""
self.max_radius = max_radius
def compute_radial_profiles(self, src_image):
""" Computes for each pixel the radial profile (with this pixel as center)
:param IImage src_image:
:rtype IHyperstack: each radial profile is stored in the channel coordinate of the hyperstack
"""
ie = IImageEngine.get_instance()
ie.debugger.on_image(src_image, 'src_image')
projector_base = CircularSymmetryProjectorBase(self.max_radius, oversampling_scale=1)
radial_profile_image = ie.create_hyperstack(width=src_image.get_width(), height=src_image.get_height(), num_channels=projector_base.get_num_projectors(), num_slices=1, num_frames=1, pixel_type=PixelType.F32)
for projector_index in range(projector_base.get_num_projectors()):
projector, center_of_filter = projector_base.get_projector_kernel(projector_index)
ie.debugger.on_image(projector, 'projector_%d' % (projector_index))
print(type(center_of_filter))
projection = ie.filter2D(src_image, dst_type=PixelType.F32, kernel=projector, anchor=(center_of_filter['x'], center_of_filter['y']))
ie.debugger.on_image(projection, 'projection_%d' % (projector_index))
radial_profile_image.set_image(self, projection, frame_index=0, slice_index=0, channel_index=projector_index)
return radial_profile_image

View File

@ -97,6 +97,14 @@ class IImage(ABC):
""" """
pass pass
@abc.abstractmethod
def scale_values(self, src_image, scale):
"""
multiply the value of each pixel by a the given scalar
:param float scale:
"""
class IHyperStack(ABC): class IHyperStack(ABC):
@abc.abstractmethod @abc.abstractmethod
@ -426,6 +434,17 @@ class IImageEngine(ABC):
:param int radius: in pixels :param int radius: in pixels
""" """
@abc.abstractmethod
def filter2D(self, src_image, dst_type, kernel, anchor=(-1, -1)):
""" as cv2.filter2D
:param IImage src_image:
:param PixelType or None dst_type: pixel type of the destination image (if None, the pixel type is the same as the source image type)
:param IImage kernel: convolution kernel (or rather a correlation kernel), a single-channel floating point matrix; if you want to apply different kernels to different channels, split the image into separate color planes using split() and process them individually.
:param tuple(int, int) angor: anchor of the kernel that indicates the relative position of a filtered point within the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor is at the kernel center.
:rtype IImage:
"""
@abc.abstractmethod @abc.abstractmethod
def perform_gray_morphology(self, image, operator, structuring_element_shape, structuring_element_radius): def perform_gray_morphology(self, image, operator, structuring_element_shape, structuring_element_radius):
""" """
@ -515,3 +534,4 @@ class IImageEngine(ABC):
:param IImage src_binary_image: :param IImage src_binary_image:
:rtype IImage: binary image :rtype IImage: binary image
""" """

View File

@ -24,6 +24,12 @@ IJ_PIXELTYPE_TO_PIXEL_TYPE = {
ImagePlus.GRAY32: PixelType.F32 ImagePlus.GRAY32: PixelType.F32
} }
PIXEL_TYPE_TO_CV_PIXEL_TYPE = {
PixelType.U8: opencv_core.CV_8U,
PixelType.U16: opencv_core.CV_16U,
PixelType.F32: opencv_core.CV_32F,
}
class IJImage(IImage): class IJImage(IImage):
def __init__(self, image_engine, ij_image=None, width=None, height=None, pixel_type=None): def __init__(self, image_engine, ij_image=None, width=None, height=None, pixel_type=None):
@ -101,6 +107,10 @@ class IJImage(IImage):
image_stats = processor.getStats() # :type ImageStatistics image_stats: image_stats = processor.getStats() # :type ImageStatistics image_stats:
return image_stats.mean return image_stats.mean
def scale_values(self, scale):
raise NotImplementedError()
class IJHyperStack(IHyperStack): class IJHyperStack(IHyperStack):
def __init__(self, image_engine, width, height, num_channels, num_slices, num_frames, pixel_type): def __init__(self, image_engine, width, height, num_channels, num_slices, num_frames, pixel_type):
@ -231,6 +241,7 @@ def perform_gray_morphology_with_ijopencv(image, operator, structuring_element_s
mat2imp = MatImagePlusConverter() mat2imp = MatImagePlusConverter()
image.ij_image.setProcessor(mat2imp.toImageProcessor(cv_dst_image)) image.ij_image.setProcessor(mat2imp.toImageProcessor(cv_dst_image))
class ImageLogger(IImageProcessingDebugger): class ImageLogger(IImageProcessingDebugger):
def __init__(self): def __init__(self):
@ -327,6 +338,39 @@ class IJImageEngine(IImageEngine):
"""Implement interface method.""" """Implement interface method."""
IJ.run(image.ij_image, "Mean...", "radius=%d" % radius) IJ.run(image.ij_image, "Mean...", "radius=%d" % radius)
def filter2D(self, src_image, dst_type, kernel, anchor=(-1, -1)):
print(type(anchor), anchor)
if anchor == (-1, -1):
assert kernel.get_width() % 2 == 1 and kernel.get_height() % 2 == 1, "kernel sizes are expected to be odd if you want the anchor to be at the center of the kernel"
anchor = ((kernel.get_width() - 1) / 2, (kernel.get_height() - 1) / 2)
imp2mat = ImagePlusMatConverter()
cv_src_image = imp2mat.toMat(src_image.ij_image.getProcessor())
cv_kernel = imp2mat.toMat(kernel.ij_image.getProcessor())
cv_anchor = opencv_core.Point(anchor[0], anchor[1])
ie = IImageEngine.get_instance()
dst_image = ie.create_image(width=src_image.get_width(), height=src_image.get_height(), pixel_type=PixelType.F32)
cv_dst_image = opencv_core.Mat(cv_src_image.size(), cv_src_image.type())
# warning ! trying to guess the arguments from opencv's documentation is time consuming as the error messages are misleading (in the following call, javacpp complains that the 1st argument is not a org.bytedeco.javacpp.opencv_core$Mat, while it is ! The problem comes from the order of the arguments). So, instead of guessing, the found accepted signatures of filter2D are in https://github.com/bytedeco/javacpp-presets/blob/master/opencv/src/gen/java/org/bytedeco/opencv/global/opencv_imgproc.java
# opencv_imgproc.morphologyEx(cv_src_image, opencv_imgproc.MORPH_OPEN, struct_element, dst_image)
# TypeError: morphologyEx(): 1st arg can't be coerced to org.bytedeco.javacpp.opencv_core$GpuMat, org.bytedeco.javacpp.opencv_core$Mat, org.bytedeco.javacpp.opencv_core$UMat
print('before opencv_imgproc.filter2D')
delta = 0
border_type = opencv_core.BORDER_DEFAULT
if dst_type is None:
ddepth = -1
else:
ddepth = PIXEL_TYPE_TO_CV_PIXEL_TYPE[dst_type]
opencv_imgproc.filter2D(cv_src_image, cv_dst_image, ddepth, cv_kernel, cv_anchor, delta, border_type)
print('after opencv_imgproc.filter2D')
mat2imp = MatImagePlusConverter()
dst_image.ij_image.setProcessor(mat2imp.toImageProcessor(cv_dst_image))
return dst_image
def perform_gray_morphology(self, image, operator, structuring_element_shape, structuring_element_radius): def perform_gray_morphology(self, image, operator, structuring_element_shape, structuring_element_radius):
""" """
:param IJImage image: :param IJImage image:

View File

@ -1,143 +0,0 @@
"""
an image processing technique to produce a 1D signal for each pixel : this 1D signal is obtained by projecting the neighborhood of this pixel on a set of bases (each base is used as a convilution kernel)
https://subversion.ipr.univ-rennes1.fr/repos/main/projects/antipode/src/python/antipode/texori.py
"""
from imageengine import IImageEngine, PixelType
class IProjectorBase(object):
"""
a set of projectors
a projector is a grey level image, in which each pixel value is acting as a weight
"""
def __init__(self):
pass
def get_num_projectors(self):
pass
def get_projector_kernel(self, projector_index):
pass
def create_circle_image(image_size, circle_radius, circle_pos, circle_thickness, background_value=0.0, circle_value=1.0):
"""
:param dict image_size:
"""
ie = IImageEngine.get_instance()
width = image_size['width']
height = image_size['height']
circle_pos_x = float(circle_pos['x'])
circle_pos_y = float(circle_pos['y'])
r_min = circle_radius - circle_thickness * 0.5
assert r_min >= 0.0
r_max = circle_radius + circle_thickness * 0.5
r_min_square = r_min * r_min
r_max_square = r_max * r_max
image = ie.create_image(width=width, height=height, pixel_type=PixelType.F32)
for y in range(height):
for x in range(width):
dx = x - circle_pos_x
dy = y - circle_pos_y
r_square = (dx * dx + dy * dy)
if r_min_square < r_square < r_max_square:
pixel_value = circle_value
else:
pixel_value = background_value
image.set_pixel(x, y, pixel_value)
return image
class CircularSymetryProjectorBase(IProjectorBase):
"""
generates a base of circles
"""
def __init__(self, max_radius, oversampling_scale=2):
"""
:param int max_radius: the biggest circle radius in the set of projectors
:param int oversampling_scale: oversampling is used to generate antialased circles. The higher this value, the better the quality of antialiasing is
"""
super().__init__()
assert max_radius > 0
assert oversampling_scale > 0
self.max_radius = max_radius
self.oversampling_scale = oversampling_scale
self.num_projectors = max_radius + 1
def get_num_projectors(self):
return self.num_projectors
def get_projector_kernel(self, projector_index):
assert projector_index < self.get_num_projectors()
radius = projector_index
image_size = self.max_radius * 2 + 1
circle_pos = {'x': self.max_radius, 'y': self.max_radius}
oversampled_circle = create_circle_image(
image_size={'width':image_size * self.oversampling_scale, 'height':image_size*self.oversampling_scale},
circle_radius=radius * self.oversampling_scale,
circle_pos={'x': self.max_radius* self.oversampling_scale, 'y': self.max_radius* self.oversampling_scale},
circle_thickness=1 * self.oversampling_scale)
circle_image = oversampled_circle.resample(width=image_size, height=image_size)
anchor_point = circle_pos
return circle_image, anchor_point
# class LocalProjector:
# """ This image processor computes the probability for each pixel to have a material oriented in the given direction
# The technique is based on the notion of a projector. The role of the projector is to project (or cumulate) the 2D neighborhood of each pixel on a projection axis that passes through the pixel and that has an orientation chosen by the user.
# """
# def __init__(self, searched_orientation, image_process_listener=imageprocessing.NullImageProcessListener()):
# imageprocessing.IImageProcessor.__init__(self, image_process_listener)
# self.searched_orientation = searched_orientation
# self.orientation_amount_image = None
# @staticmethod
# def compute_noise_in_perpendicular_orientation(src_image, orientation, half_window_width=5, image_process_listener=imageprocessing.NullImageProcessListener()):
# """ Computes an image that gives for each pixel, the amount of noise in a direction perpendicular to the given orientation
# Because the projection has to be performed for each pixel, we encode the projector operator in the form of a set of num_projectors 2D kernels that are used as a filter. Each kernel cumulates the image signal (therefore computes the sum of the image signal) along a line perpendicular to the projection axis, but each at a different position (offset) on this projection axis.
# :param src_image: the input image, the one we want to process
# :type src_image: numpy.array
# :param orientation: the orientation along which the noise needs to be computed
# :type orientation: float (in radians)
# :param half_window_width: half size of the neighborhood window, in pixels (the heighborood window is a window of size (2*half_window_width+1, 2*half_window_width+1) centered on each pixel). The bigger, the more precise the measurement but with less locality.
# """
# projector_base = OrientationProbabilityEstimator.ProjectorBase2(orientation, searched_segment_size=10.0, cut_blur_stddev=3.0, image_process_listener=image_process_listener)
# projections = numpy.empty((src_image.shape[0], src_image.shape[1], projector_base.get_num_projectors()), dtype=numpy.float32)
# # print('projections.shape=%s' % str(projections.shape))
# for projector_index in range(projector_base.get_num_projectors()):
# projector, center_of_filter = projector_base.get_projector_kernel(projector_index)
# projection = cv2.filter2D(src_image.astype(numpy.float32), ddepth=-1, kernel=projector, anchor=tuple(center_of_filter))
# image_process_listener.onImage(projection, 'projection_%f_%d' % (orientation, projector_index))
# projections[:, :, projector_index] = projection
# # for each pixel, compute the gradient along its projection axis
# # swap axes because I don't think the filter2D only works along fist 2 axes
# projections = numpy.swapaxes(projections, 1, 2)
# grad_kernel = numpy.array((-0.5, 0.5), dtype=numpy.float32)
# gradients = cv2.filter2D(projections, ddepth=-1, kernel=grad_kernel, anchor=(0, 0))
# if image_process_listener is not None:
# # grab a slice of the gradient image for debugging purpose
# cut = gradients[:, :, gradients.shape[2] / 2]
# image_process_listener.onImage(cut, 'cut_%f' % (orientation))
# gradients = numpy.swapaxes(gradients, 1, 2)
# gradients *= gradients
# texture_amount = numpy.sum(gradients, axis=2)
# image_process_listener.onImage(texture_amount, 'texture_amount_%f' % (orientation))
# return texture_amount
# def processImage(self, image):
# self.orientation_amount_image = OrientationProbabilityEstimator.compute_noise_in_perpendicular_orientation(image, self.searched_orientation, self.get_image_process_listener())
# def get_result(self):
# return self.orientation_amount_image

View File

@ -8,7 +8,7 @@ import unittest # unittest2 doesn't exist in fiji
import sys import sys
from lipase.imageengine import IImageEngine, PixelType, Aabb, NullDebugger, FileBasedDebugger from lipase.imageengine import IImageEngine, PixelType, Aabb, NullDebugger, FileBasedDebugger
from lipase.imagej.ijimageengine import IJImageEngine, IJImage from lipase.imagej.ijimageengine import IJImageEngine, IJImage
from lipase.localprojector import create_circle_image from lipase.circsymdetector import create_circle_image
class ImProcTester(unittest.TestCase): class ImProcTester(unittest.TestCase):

View File

@ -16,6 +16,7 @@ from lipase.traps_detector import TrapsDetector
from lipase.catalog import ImageCatalog, Sequence from lipase.catalog import ImageCatalog, Sequence
from lipase.lipase import Lipase, ImageLogger from lipase.lipase import Lipase, ImageLogger
from lipase.lipase import GlobulesAreaEstimator, EmptyFrameBackgroundEstimator from lipase.lipase import GlobulesAreaEstimator, EmptyFrameBackgroundEstimator
from lipase.circsymdetector import CircularSymmetryDetector
from lipase.imagej.hdf5serializer import save_hdf5_file from lipase.imagej.hdf5serializer import save_hdf5_file
@ -35,78 +36,86 @@ class TestLipase(unittest.TestCase):
print("uninitializing TestLipase instance") print("uninitializing TestLipase instance")
self.catalog = None self.catalog = None
def test_estimate_white(self): # def test_estimate_white(self):
sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos2'] # sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos2']
white_estimator = WhiteEstimator(open_size=75, close_size=75, average_size=75) # white_estimator = WhiteEstimator(open_size=75, close_size=75, average_size=75)
white_estimate = white_estimator.estimate_white([sequence], ['DM300_327-353_fluo']) # white_estimate = white_estimator.estimate_white([sequence], ['DM300_327-353_fluo'])
# find_white_reference_image(white_estimate, sequence.get_white()) # # find_white_reference_image(white_estimate, sequence.get_white())
print(white_estimate) # print(white_estimate)
IImageEngine.get_instance().debugger.on_image(white_estimate, 'white_estimate') # IImageEngine.get_instance().debugger.on_image(white_estimate, 'white_estimate')
# assert False, "hellooooo" # # assert False, "hellooooo"
print('end of test_estimate_white') # print('end of test_estimate_white')
def test_uniform_lighting_correction(self): # def test_uniform_lighting_correction(self):
non_uniform_sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0'] # non_uniform_sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0']
uniform_sequence = correct_non_uniform_lighting(non_uniform_sequence, 'DM300_nofilter_vis', white_estimator=WhiteEstimator(open_size=75, close_size=75, average_size=75)) # pylint: disable=unused-variable # uniform_sequence = correct_non_uniform_lighting(non_uniform_sequence, 'DM300_nofilter_vis', white_estimator=WhiteEstimator(open_size=75, close_size=75, average_size=75)) # pylint: disable=unused-variable
def test_template_matcher(self): # def test_template_matcher(self):
sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0'] # sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0']
stack = sequence.as_hyperstack(['DM300_nofilter_vis'], selected_frames=[0]) # stack = sequence.as_hyperstack(['DM300_nofilter_vis'], selected_frames=[0])
first_image = stack.get_image(frame_index=0) # first_image = stack.get_image(frame_index=0)
x_min = 423 # x_min = 423
x_max = 553 # x_max = 553
y_min = 419 # y_min = 419
y_max = 533 # y_max = 533
template_trap_aabb = Aabb(x_min, y_min, x_max, y_max) # template_trap_aabb = Aabb(x_min, y_min, x_max, y_max)
template_trap_image = first_image.get_subimage(template_trap_aabb) # template_trap_image = first_image.get_subimage(template_trap_aabb)
for image in [first_image, template_trap_image]: # for image in [first_image, template_trap_image]:
print(image.get_pixel_type(), image.get_width(), image.get_height()) # print(image.get_pixel_type(), image.get_width(), image.get_height())
# the typical value of peaks is -2.e10 and the value between peaks is below -8.0e10 # # the typical value of peaks is -2.e10 and the value between peaks is below -8.0e10
threshold = -3.0e10 # threshold = -3.0e10
tolerance = 1.0e10 # tolerance = 1.0e10
maxima_finder = MaximaFinder(threshold, tolerance) # maxima_finder = MaximaFinder(threshold, tolerance)
template_matcher = TemplateMatcher(maxima_finder) # template_matcher = TemplateMatcher(maxima_finder)
matches = template_matcher.match_template(first_image, template_trap_image) # matches = template_matcher.match_template(first_image, template_trap_image)
num_traps = len(matches) # num_traps = len(matches)
print("number of traps found : %d" % num_traps) # print("number of traps found : %d" % num_traps)
num_expected_traps = 13 # 13 traps are completely visible in the first image # num_expected_traps = 13 # 13 traps are completely visible in the first image
self.assertAlmostEqual(len(matches), num_expected_traps, delta=1.0) # self.assertAlmostEqual(len(matches), num_expected_traps, delta=1.0)
def test_traps_detector(self): # def test_traps_detector(self):
# the typical value of peaks is -500 and the value between peaks is below -2500 # # the typical value of peaks is -500 and the value between peaks is below -2500
threshold = -1500.0 # threshold = -1500.0
tolerance = 1500 # tolerance = 1500
maxima_finder = MaximaFinder(threshold, tolerance) # maxima_finder = MaximaFinder(threshold, tolerance)
template_matcher = TemplateMatcher(maxima_finder) # template_matcher = TemplateMatcher(maxima_finder)
traps_detector = TrapsDetector(template_matcher) # traps_detector = TrapsDetector(template_matcher)
sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0'] # sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0']
x_min = 423 # x_min = 423
x_max = 553 # x_max = 553
y_min = 419 # y_min = 419
y_max = 533 # y_max = 533
trap_aabb = Aabb(x_min, y_min, x_max, y_max) # trap_aabb = Aabb(x_min, y_min, x_max, y_max)
traps_mask = traps_detector.compute_traps_mask(sequence, 'DM300_nofilter_vis', trap_aabb) # traps_mask = traps_detector.compute_traps_mask(sequence, 'DM300_nofilter_vis', trap_aabb)
measured_mean_value = traps_mask.get_mean_value() # measured_mean_value = traps_mask.get_mean_value()
expected_traps_coverage = 0.07909 # expected_traps_coverage = 0.07909
traps_pixel_value = 255.0 # traps_pixel_value = 255.0
expected_mean_value = expected_traps_coverage * traps_pixel_value # expected_mean_value = expected_traps_coverage * traps_pixel_value
print("expected_mean_value: %f" % expected_mean_value) # print("expected_mean_value: %f" % expected_mean_value)
print("measured_mean_value: %f" % measured_mean_value) # print("measured_mean_value: %f" % measured_mean_value)
self.assertAlmostEqual(measured_mean_value, expected_mean_value, delta=0.01) # self.assertAlmostEqual(measured_mean_value, expected_mean_value, delta=0.01)
def test_visible_traps_sequence_processing(self): # def test_visible_traps_sequence_processing(self):
# traps_sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0']
# visible_traps_sequence = traps_sequence.as_hyperstack(['DM300_nofilter_vis'])
# background_estimator = EmptyFrameBackgroundEstimator(empty_frame_index=39)
# processor = GlobulesAreaEstimator(background_estimator=background_estimator, particle_threshold=2000.0)
# results = processor.detect_particles(visible_traps_sequence)
# save_hdf5_file('results.h5', results)
# # results file could be checked with "h5dump --xml ./lipase.git/results.h5"
# first_frame_measured_ratio = results['globules_area_ratio'][(0,)]
# first_frame_expected_ratio = 0.008
# self.assertAlmostEqual(first_frame_measured_ratio, first_frame_expected_ratio, delta=0.01)
def test_circle_detector(self):
traps_sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0'] traps_sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0']
visible_traps_sequence = traps_sequence.as_hyperstack(['DM300_nofilter_vis']) visible_traps_sequence = traps_sequence.as_hyperstack(['DM300_nofilter_vis'])
background_estimator = EmptyFrameBackgroundEstimator(empty_frame_index=39) src_image = visible_traps_sequence.get_image(frame_index=0)
processor = GlobulesAreaEstimator(background_estimator=background_estimator, particle_threshold=2000.0) ie = IImageEngine.get_instance()
results = processor.detect_particles(visible_traps_sequence) detector = CircularSymmetryDetector(max_radius=10.0)
save_hdf5_file('results.h5', results) radial_profiles = detector.compute_radial_profiles(src_image)
# results file could be checked with "h5dump --xml ./lipase.git/results.h5"
first_frame_measured_ratio = results['globules_area_ratio'][(0,)]
first_frame_expected_ratio = 0.008
self.assertAlmostEqual(first_frame_measured_ratio, first_frame_expected_ratio, delta=0.01)
# def test_lipase_process(self): # def test_lipase_process(self):
# lipase = Lipase(self.catalog, debugger=NullDebugger()) # lipase = Lipase(self.catalog, debugger=NullDebugger())