added code to detect the traps

This processing will be needed to compute the traps mask
This code needs to be cleaned
This commit is contained in:
Guillaume Raffy 2020-01-29 12:11:43 +01:00
parent 6e165b8fc6
commit 224234c3e7
4 changed files with 196 additions and 14 deletions

View File

@ -15,7 +15,14 @@ class Aabb(object):
self.y_min = y_min
self.x_max = x_max
self.y_max = y_max
@property
def width(self):
return self.x_max - self.x_min + 1
@property
def height(self):
return self.y_max - self.y_min + 1
class PixelType:
U8 = 1
@ -30,6 +37,16 @@ class PixelType:
def get_num_bits(pixel_type):
return PixelType.PIXEL_TYPE_TO_NUM_BITS[ pixel_type ]
class Match(object):
def __init__(self, x, y, correlation):
self.x = x
self.y = y
self.correlation = correlation
def __str__(self):
return "(%d, %d) - %f" % (self.x, self.y, self.correlation)
class IImage(ABC):
@abc.abstractmethod
@ -40,6 +57,20 @@ class IImage(ABC):
def height(self):
pass
@abc.abstractmethod
def get_subimage(self, aabb):
"""
:param Aabb aabb: axis-aligned bounding box of the subimage
"""
pass
@abc.abstractmethod
def get_pixel_type(self):
"""
:rtype PixelType:
"""
pass
class IHyperStack(ABC):
@ -198,6 +229,13 @@ class IImageEngine(ABC):
@abc.abstractmethod
def create_hyperstack(self, width, height, num_channels, num_slices, num_frames, pixel_type):
"""
:rtype IHyperStack:
"""
@abc.abstractmethod
def create_image(self, width, height, pixel_type):
"""
:rtype IImage:
"""
@abc.abstractmethod
@ -260,4 +298,17 @@ class IImageEngine(ABC):
raise NotImplementedError()
return image
@abc.abstractmethod
def match_template(self, src_image, template_image):
"""
:param IImage src_image:
:param IImage template_image:
:return IImage: similarity image
"""
@abc.abstractmethod
def find_maxima(self, src_image):
"""
:param IImage src_image:
:return list(Match): maxima
"""

View File

@ -1,9 +1,11 @@
"""Image processing using imagej.
"""
from ..imageengine import IImage, IHyperStack, IImageEngine, PixelType
from ..imageengine import IImage, IHyperStack, IImageEngine, PixelType, Match
from ij import IJ, ImagePlus # pylint: disable=import-error
from ij.measure import ResultsTable
from ij.plugin import ImageCalculator # pylint: disable=import-error
from ij.plugin.filter import MaximumFinder
from ijopencv.ij import ImagePlusMatConverter # pylint: disable=import-error
from ijopencv.opencv import MatImagePlusConverter # pylint: disable=import-error
import org.bytedeco.javacpp.opencv_core as opencv_core # pylint: disable=import-error
@ -11,15 +13,31 @@ import org.bytedeco.javacpp.opencv_imgproc as opencv_imgproc # pylint: disable=
# from org.bytedeco.javacpp.opencv_imgcodecs import imread, imwrite # pylint: disable=import-error
IJ_PIXELTYPE_TO_PIXEL_TYPE = {
ImagePlus.GRAY8: PixelType.U8,
ImagePlus.GRAY16: PixelType.U16,
ImagePlus.GRAY32: PixelType.F32
}
class IJImage(IImage):
def __init__(self, image_engine, ij_image):
def __init__(self, image_engine, ij_image=None, width=None, height=None, pixel_type=None):
"""
:param IJImageEngine image_engine:
:param ImagePlus ij_image:
"""
self.image_engine = image_engine
self.ij_image = ij_image
if ij_image is not None:
assert ij_image.getNSlices() == 1
assert ij_image.getNFrames() == 1
assert ij_image.getNChannels() == 1
self.ij_image = ij_image
else:
assert isinstance(width, int)
assert isinstance(height, int)
assert isinstance(pixel_type, int)
stack_name = ''
self.ij_image = IJ.createHyperStack(stack_name, width, height, 1, 1, 1, PixelType.get_num_bits(pixel_type))
def width(self):
return self.ij_image.width
@ -27,6 +45,20 @@ class IJImage(IImage):
def height(self):
return self.ij_image.height
def get_pixel_type(self):
ij_pixel_type = self.ij_image.getType()
return IJ_PIXELTYPE_TO_PIXEL_TYPE[ij_pixel_type]
def get_subimage(self, aabb):
self.ij_image.saveRoi()
self.ij_image.setRoi(aabb.x_min, aabb.y_min, aabb.width, aabb.height)
ij_subimage = self.ij_image.crop()
self.ij_image.restoreRoi()
# ij_subimage = self.image_engine.create_hyperstack(width=aabb.width, height=aabb.height, num_channels=1, num_slices=1, num_frames=1, pixel_type=self.get_pixel_type())
# ij_subimage.paste()
return IJImage(self.image_engine, ij_subimage)
class IJHyperStack(IHyperStack):
@ -67,7 +99,7 @@ class IJHyperStack(IHyperStack):
def get_pixel_type(self):
ij_pixel_type = self.hyperstack.getType()
return { ImagePlus.GRAY8: PixelType.U8, ImagePlus.GRAY16: PixelType.U16 }[ij_pixel_type]
return IJ_PIXELTYPE_TO_PIXEL_TYPE[ij_pixel_type]
@ -169,6 +201,9 @@ class IJImageEngine(IImageEngine):
def create_hyperstack(self, width, height, num_channels, num_slices, num_frames, pixel_type):
return IJHyperStack(self, width, height, num_channels, num_slices, num_frames, pixel_type)
def create_image(self, width, height, pixel_type):
return IJImage(self, width=width, height=height, pixel_type=pixel_type)
def load_image(self, src_image_file_path):
image_plus = IJ.openImage(src_image_file_path)
return IJImage(self, image_plus)
@ -224,5 +259,80 @@ class IJImageEngine(IImageEngine):
raise NotImplementedError()
return image
def match_template(self, src_image, template_image):
# import org.opencv.imgproc.Imgproc;
#
# /*
# * CV_TM_SQDIFF = 0, CV_TM_SQDIFF_NORMED = 1, CV_TM_CCORR = 2,
# * CV_TM_CCORR_NORMED = 3, CV_TM_CCOEFF = 4, CV_TM_CCOEFF_NORMED = 5;
# */
correlation_method_id = opencv_imgproc.CV_TM_SQDIFF
# Imgproc.matchTemplate(src, tpl, correlationImage, toCvCorrelationId(correlationMethodId));
imp2mat = ImagePlusMatConverter()
cv_src_image = imp2mat.toMat(src_image.ij_image.getProcessor())
cv_template_image = imp2mat.toMat(template_image.ij_image.getProcessor())
similarity_image_size = opencv_core.Size(src_image.width()-template_image.width()+1, src_image.width()-template_image.height()+1)
cv_similarity_image = opencv_core.Mat(similarity_image_size, cv_src_image.type())
# warning ! trying to guess the arguments from opencv's documentation is time consuming as the error messages are misleading (in the following call, javacpp complains that the 1st argument is not a org.bytedeco.javacpp.opencv_core$Mat, while it is ! The problem comes from the order of the arguments). So, instead of guessing, the found accepted signatures of cvMatchTemplate are in https://github.com/bytedeco/javacpp-presets/blob/master/opencv/src/gen/java/org/bytedeco/opencv/global/opencv_imgproc.java
# print(cv_src_image)
# print(cv_template_image)
# print(cv_similarity_image)
if False:
cv_src_image = opencv_core.Mat(cv_src_image.size(), opencv_core.CV_8U)
cv_template_image = opencv_core.Mat(cv_template_image.size(), opencv_core.CV_8U)
cv_similarity_image = opencv_core.Mat(cv_similarity_image.size(), opencv_core.CV_8U)
print(cv_src_image)
print(cv_template_image)
print(cv_similarity_image)
opencv_imgproc.matchTemplate( cv_src_image, cv_template_image, cv_similarity_image, correlation_method_id)
if correlation_method_id in [ opencv_imgproc.CV_TM_SQDIFF, opencv_imgproc.CV_TM_SQDIFF_NORMED ]:
scale = -1.0
# opencv_core.multiply(cv_similarity_image, opencv_core.Scalar.all(scale), cv_similarity_image)
# cv_similarity_image = opencv_core.Mat(similarity_image_size, cv_src_image.type())
# cv_similarity_image = opencv_core.Mat(cv_similarity_image.size(), opencv_core.CV_8U)
cv_similarity_image = opencv_core.multiply(cv_similarity_image, scale).asMat()
# print(cv_similarity_image)
similarity_image = self.create_image(
width=src_image.width()-template_image.width()+1,
height=src_image.height()-template_image.height()+1,
pixel_type=PixelType.F32)
mat2imp = MatImagePlusConverter()
similarity_image.ij_image.setProcessor(mat2imp.toImageProcessor(cv_similarity_image))
return similarity_image
def find_maxima(self, src_image):
# to understand the meaning of threshold ansd maxima, I had to look at the source code in https://github.com/imagej/imagej1/blob/master/ij/plugin/filter/MaximumFinder.java#L636
# 1. The algorithm first builds the list of maxima candidates. Only local maxima above this threshold are considered as valid maxima candidates
# 2. Then each maximum candidate is processed to find its neighborhood area, starting from the highest. For ecach candidate, a propagation is performed on neighbours as long as the neighbour value is within the tolerance of the maximum candidate. If another candidate is encountered as a neighbour, then this candidate is removed as it('s been absorbed by the current maximum candidate)
threshold = -1500.0 # only consider local maxima above this value
tolerance = 1500 # ignore local maxima if they are in the neighborhood of another maximum. The neighborhood of a maximum is defined by the area that propagates until the the difference between the pixel value and the value of the considered local maximum exceeds tolerance. The higher the tolerance, the more local maxima are removed....
# the typical value of peaks is -500 and the value between peaks is below -2500
matches = []
output_type = MaximumFinder.LIST
exclude_on_edges = False
is_euclidean_distance_map = False
max_finder = MaximumFinder()
max_finder.findMaxima(src_image.ij_image.getProcessor(), tolerance, threshold, output_type, exclude_on_edges, is_euclidean_distance_map)
results_table = ResultsTable.getResultsTable()
num_points = results_table.getCounter()
# print("number of matches : %d" % num_points)
for match_index in range(num_points):
x = int(results_table.getValue('X', match_index))
y = int(results_table.getValue('Y', match_index))
correlation = src_image.ij_image.getProcessor().getf(x, y)
match = Match(x, y, correlation)
# print(match)
matches.append(match)
return matches

View File

@ -11,6 +11,13 @@ def remove_traps(sequence, channel_id, trap_aabb):
:param Sequence sequence:
:param str channel_id:
:param Aabb trap_aabb:
the idea of this method is as follows:
1. allow the user to provide an axis-aligned bounding box that contains a trap, and use this area as a pattern to be searched in all images of the sequence. Provided the tolerance is big enough, this should find all traps in the sequence.
2. compute a clean (without globules) trap, by computing for each pixel, the median value from all traps for that pixel.
3. For each trap location, remove the trap by substracting the clean trap. This should leave the globules only.
At the end of this image processing, we expect to only have the globules in the image.
"""
white_estimator = WhiteEstimator(open_size=75, close_size=75, average_size=75)
white_estimate = white_estimator.estimate_white([sequence], [channel_id])
@ -18,13 +25,27 @@ def remove_traps(sequence, channel_id, trap_aabb):
uniform_stack = IImageEngine.get_instance().create_hyperstack(width=white_estimate.width(), height=white_estimate.height(), num_slices=1, num_frames=sequence.num_frames, num_channels=1, pixel_type=PixelType.F32)
ie = IImageEngine.get_instance()
for frame_index in range(sequence.num_frames):
non_uniform_frame = IImageEngine.get_instance().load_image(sequence.get_image_file_path(channel_index=0, frame_index=frame_index, slice_index=0))
uniform_frame = IImageEngine.get_instance().divide(non_uniform_frame, white_estimate)
uniform_stack.set_image(frame_index=frame_index, image=uniform_frame)
#num_expected_traps = 13 # 13 traps are completely visible in the
#assert num_traps = num_expected_traps
first_image = uniform_stack.get_image(frame_index=0)
trap_image = first_image.get_subimage(trap_aabb)
ie.save_as_tiff(trap_image, '/home/graffy/Desktop/template.tiff')
similarity_image = ie.match_template(first_image, trap_image)
ie.save_as_tiff(similarity_image, '/home/graffy/Desktop/similarity.tiff')
matches = ie.find_maxima(similarity_image)
num_traps = len(matches)
print("number of traps found : %d" % num_traps)
num_expected_traps = 13 # 13 traps are completely visible in the
assert abs(num_traps - num_expected_traps) <= 1
#non_uniform_stack = sequence.as_stack()
#uniform_stack = IImageEngine.get_instance().divide(non_uniform_stack, white_estimate)

View File

@ -43,14 +43,14 @@ class TestLipase(unittest.TestCase):
non_uniform_sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0']
uniform_sequence = correct_non_uniform_lighting(non_uniform_sequence, 'DM300_nofilter_vis', white_estimator=WhiteEstimator(open_size=75, close_size=75, average_size=75))
# def test_traps_removal(self):
# sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0']
# x_min = 423
# x_max = 553
# y_min = 419
# y_max = 533
# trap_aabb = Aabb(x_min, y_min, x_max, y_max)
# remove_traps(sequence, 'DM300_nofilter_vis', trap_aabb)
def test_traps_removal(self):
sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0']
x_min = 423
x_max = 553
y_min = 419
y_max = 533
trap_aabb = Aabb(x_min, y_min, x_max, y_max)
remove_traps(sequence, 'DM300_nofilter_vis', trap_aabb)
def run_script():