refactored traps detector into 3 different classes and cleaned up

This commit is contained in:
Guillaume Raffy 2020-01-29 18:19:01 +01:00
parent a64eed47da
commit dcf5d61f3d
8 changed files with 152 additions and 88 deletions

View File

@ -37,16 +37,6 @@ class PixelType:
def get_num_bits(pixel_type):
return PixelType.PIXEL_TYPE_TO_NUM_BITS[ pixel_type ]
class Match(object):
def __init__(self, x, y, correlation):
self.x = x
self.y = y
self.correlation = correlation
def __str__(self):
return "(%d, %d) - %f" % (self.x, self.y, self.correlation)
class IImage(ABC):
@abc.abstractmethod
@ -193,11 +183,6 @@ class StackImageFeeder(IImageFeeder):
return image
def add_image(self, image_filepath):
self.image_filepaths.append(image_filepath)
class IImageEngine(ABC):
"""
@ -295,8 +280,6 @@ class IImageEngine(ABC):
white_estimate = outer;
clear outer;
"""
raise NotImplementedError()
return image
@abc.abstractmethod
def match_template(self, src_image, template_image):
@ -307,8 +290,14 @@ class IImageEngine(ABC):
"""
@abc.abstractmethod
def find_maxima(self, src_image):
def find_maxima(self, src_image, threshold, tolerance):
"""
to understand the meaning of threshold and maxima, I had to look at the source code in https://github.com/imagej/imagej1/blob/master/ij/plugin/filter/MaximumFinder.java#L636
1. The algorithm first builds the list of maxima candidates. Only local maxima above this threshold are considered as valid maxima candidates
2. Then each maximum candidate is processed to find its neighborhood area, starting from the highest. For ecach candidate, a propagation is performed on neighbours as long as the neighbour value is within the tolerance of the maximum candidate. If another candidate is encountered as a neighbour, then this candidate is removed as it('s been absorbed by the current maximum candidate)
:param IImage src_image:
:param float threshold: local maxima below this value are ignored
:param float tolerance: ignore local maxima if they are in the neighborhood of another maximum. The neighborhood of a maximum is defined by the area that propagates until the the difference between the pixel value and the value of the considered local maximum exceeds tolerance. The higher the tolerance, the more local maxima are removed....
:return list(Match): maxima
"""

View File

@ -1,11 +1,12 @@
"""Image processing using imagej.
"""
from ..imageengine import IImage, IHyperStack, IImageEngine, PixelType, Match
from ..imageengine import IImage, IHyperStack, IImageEngine, PixelType
from ..maxima_finder import Match
from ij import IJ, ImagePlus # pylint: disable=import-error
from ij.measure import ResultsTable
from ij.measure import ResultsTable # pylint: disable=import-error
from ij.plugin import ImageCalculator # pylint: disable=import-error
from ij.plugin.filter import MaximumFinder
from ij.plugin.filter import MaximumFinder # pylint: disable=import-error
from ijopencv.ij import ImagePlusMatConverter # pylint: disable=import-error
from ijopencv.opencv import MatImagePlusConverter # pylint: disable=import-error
import org.bytedeco.javacpp.opencv_core as opencv_core # pylint: disable=import-error
@ -128,9 +129,7 @@ def perform_gray_morphology_with_imagej(image, operator, structuring_element_sha
assert operator not in ['fast open', 'fast erode'], "as of 13/09/2019, fast operators such as 'fast erode' seem broken in fiji (the resulting image is not at all similar to their slow equivalent)"
processor = image.ij_image.getProcessor()
convert_to_byte = False
if processor.getBitDepth() != 8:
convert_to_byte = True
min_value = processor.getMin()
max_value = processor.getMax()
print("warning: downgrading image to byte as imagej's Gray Morphology processing doesn't handle 16bit images (range=[%d, %d])" % (min_value, max_value))
@ -257,7 +256,7 @@ class IJImageEngine(IImageEngine):
def replace_border(self, image, band_width):
"""Implement interface method."""
raise NotImplementedError()
return image
return image # pylint: disable=unreachable
def match_template(self, src_image, template_image):
@ -310,13 +309,10 @@ class IJImageEngine(IImageEngine):
return similarity_image
def find_maxima(self, src_image):
def find_maxima(self, src_image, threshold, tolerance):
# to understand the meaning of threshold ansd maxima, I had to look at the source code in https://github.com/imagej/imagej1/blob/master/ij/plugin/filter/MaximumFinder.java#L636
# 1. The algorithm first builds the list of maxima candidates. Only local maxima above this threshold are considered as valid maxima candidates
# 2. Then each maximum candidate is processed to find its neighborhood area, starting from the highest. For ecach candidate, a propagation is performed on neighbours as long as the neighbour value is within the tolerance of the maximum candidate. If another candidate is encountered as a neighbour, then this candidate is removed as it('s been absorbed by the current maximum candidate)
threshold = -1500.0 # only consider local maxima above this value
tolerance = 1500 # ignore local maxima if they are in the neighborhood of another maximum. The neighborhood of a maximum is defined by the area that propagates until the the difference between the pixel value and the value of the considered local maximum exceeds tolerance. The higher the tolerance, the more local maxima are removed....
# the typical value of peaks is -500 and the value between peaks is below -2500
matches = []

View File

@ -0,0 +1,43 @@
import abc
from imageengine import IImageEngine
ABC = abc.ABCMeta('ABC', (object,), {})
class Match(object):
def __init__(self, x, y, correlation):
self.x = x
self.y = y
self.correlation = correlation
def __str__(self):
return "(%d, %d) - %f" % (self.x, self.y, self.correlation)
class IMaximaFinder(ABC):
@abc.abstractmethod
def find_maxima(self, image):
"""
:param IImage image:
:rtype list(Match):
"""
class MaximaFinder(IMaximaFinder):
def __init__(self, threshold, tolerance):
"""
to understand the meaning of threshold and maxima, I had to look at the source code in https://github.com/imagej/imagej1/blob/master/ij/plugin/filter/MaximumFinder.java#L636
1. The algorithm first builds the list of maxima candidates. Only local maxima above this threshold are considered as valid maxima candidates
2. Then each maximum candidate is processed to find its neighborhood area, starting from the highest. For ecach candidate, a propagation is performed on neighbours as long as the neighbour value is within the tolerance of the maximum candidate. If another candidate is encountered as a neighbour, then this candidate is removed as it('s been absorbed by the current maximum candidate)
:param float threshold: local maxima below this value are ignored
:param float tolerance: ignore local maxima if they are in the neighborhood of another maximum. The neighborhood of a maximum is defined by the area that propagates until the the difference between the pixel value and the value of the considered local maximum exceeds tolerance. The higher the tolerance, the more local maxima are removed....
"""
self.threshold = threshold
self.tolerance = tolerance
def find_maxima(self, correlation_image):
ie = IImageEngine.get_instance()
matches = ie.find_maxima(correlation_image, self.threshold, self.tolerance)
return matches

View File

@ -138,7 +138,7 @@ class WhiteEstimator(object):
# modify spurious pixels on the side of the images
try:
IImageEngine.get_instance().replace_border(white_estimate, 3)
except NotImplementedError as error:
except NotImplementedError as error: # pylint: disable=unused-variable
print('warning: replace_outer_frame is not implemented yet')
self._remove_particles(white_estimate)
@ -175,15 +175,18 @@ def correct_non_uniform_lighting(non_uniform_sequence, channel_id, white_estimat
:param WhiteEstimator white_estimator: the method used to estimate the light image
:return IHyperStack: the hyperstack of images after correction
"""
ie = IImageEngine.get_instance()
white_estimate = white_estimator.estimate_white([non_uniform_sequence], [channel_id])
uniform_stack = IImageEngine.get_instance().create_hyperstack(width=white_estimate.width(), height=white_estimate.height(), num_slices=1, num_frames=non_uniform_sequence.num_frames, num_channels=1, pixel_type=PixelType.F32)
uniform_stack = ie.create_hyperstack(width=white_estimate.width(), height=white_estimate.height(), num_slices=1, num_frames=non_uniform_sequence.num_frames, num_channels=1, pixel_type=PixelType.F32)
for frame_index in range(non_uniform_sequence.num_frames):
non_uniform_frame = IImageEngine.get_instance().load_image(non_uniform_sequence.get_image_file_path(channel_index=0, frame_index=frame_index, slice_index=0))
uniform_frame = IImageEngine.get_instance().divide(non_uniform_frame, white_estimate)
non_uniform_frame = ie.load_image(non_uniform_sequence.get_image_file_path(channel_index=0, frame_index=frame_index, slice_index=0))
uniform_frame = ie.divide(non_uniform_frame, white_estimate)
uniform_stack.set_image(frame_index=frame_index, image=uniform_frame)
return uniform_stack
# def get_sequence_as_hyperstack(sequence, selected_channel_ids=None, selected_frames=None, selected_slices=None):
# """ returns a subset of the sequence as an hyperstack
# :param IHyperStack non_uniform_sequence: the input sequence

View File

@ -0,0 +1,28 @@
from imageengine import IImageEngine
from maxima_finder import IMaximaFinder
class TemplateMatcher(object):
def __init__(self, maxima_finder):
"""
:param IMaximaFinder: maxima_finder
"""
self.maxima_finder = maxima_finder
def match_template(self, image, pattern):
"""Remove the traps in the input sequence
:param IImage image: the image in which we want to detect the pattern
:param IImage pattern:
:return list(Matches): the list of detected matches
"""
ie = IImageEngine.get_instance()
correlation_image = ie.match_template(image, pattern)
# ie.save_as_tiff(correlation_image, '/home/graffy/Desktop/similarity.tiff')
matches = self.maxima_finder.find_maxima(correlation_image)
return matches

View File

@ -0,0 +1,45 @@
from catalog import ImageCatalog, Sequence
# from imageengine import Toto
from imageengine import IImageEngine
from imageengine import PixelType
from preprocessing import WhiteEstimator, correct_non_uniform_lighting
from template_matcher import TemplateMatcher
class TrapsDetector(object):
def __init__(self, template_matcher):
"""
:param TemplateMatcher template_matcher:
"""
self.template_matcher = template_matcher
def compute_traps_mask(self, sequence, channel_id, trap_aabb):
"""Remove the traps in the input sequence
:param Sequence sequence:
:param str channel_id:
:param Aabb trap_aabb:
:return Image: the mask of traps (binary image containing 0)
the idea of this method is as follows:
1. allow the user to provide an axis-aligned bounding box that contains a trap, and use this area as a pattern to be searched in all images of the sequence. Provided the tolerance is big enough, this should find all traps in the sequence.
2. compute a clean (without globules) trap, by computing for each pixel, the median value from all traps for that pixel.
3. For each trap location, remove the trap by substracting the clean trap. This should leave the globules only.
At the end of this image processing, we expect to only have the globules in the image.
"""
uniform_stack = correct_non_uniform_lighting(sequence, channel_id, white_estimator=WhiteEstimator(open_size=75, close_size=75, average_size=75))
first_image = uniform_stack.get_image(frame_index=0)
trap_image = first_image.get_subimage(trap_aabb)
# ie.save_as_tiff(trap_image, '/home/graffy/Desktop/template.tiff')
matches = self.template_matcher.match_template(first_image, trap_image)
#non_uniform_stack = sequence.as_stack()
#uniform_stack = IImageEngine.get_instance().divide(non_uniform_stack, white_estimate)
# IImageEngine.get_instance().save_as_tiff(white_estimate, './white_estimate.tiff')
return matches

View File

@ -1,53 +0,0 @@
from catalog import ImageCatalog, Sequence
# from imageengine import Toto
from imageengine import IImageEngine
from imageengine import PixelType
from preprocessing import WhiteEstimator
def remove_traps(sequence, channel_id, trap_aabb):
"""Remove the traps in the input sequence
:param Sequence sequence:
:param str channel_id:
:param Aabb trap_aabb:
the idea of this method is as follows:
1. allow the user to provide an axis-aligned bounding box that contains a trap, and use this area as a pattern to be searched in all images of the sequence. Provided the tolerance is big enough, this should find all traps in the sequence.
2. compute a clean (without globules) trap, by computing for each pixel, the median value from all traps for that pixel.
3. For each trap location, remove the trap by substracting the clean trap. This should leave the globules only.
At the end of this image processing, we expect to only have the globules in the image.
"""
white_estimator = WhiteEstimator(open_size=75, close_size=75, average_size=75)
white_estimate = white_estimator.estimate_white([sequence], [channel_id])
print(white_estimate)
uniform_stack = IImageEngine.get_instance().create_hyperstack(width=white_estimate.width(), height=white_estimate.height(), num_slices=1, num_frames=sequence.num_frames, num_channels=1, pixel_type=PixelType.F32)
ie = IImageEngine.get_instance()
for frame_index in range(sequence.num_frames):
non_uniform_frame = IImageEngine.get_instance().load_image(sequence.get_image_file_path(channel_index=0, frame_index=frame_index, slice_index=0))
uniform_frame = IImageEngine.get_instance().divide(non_uniform_frame, white_estimate)
uniform_stack.set_image(frame_index=frame_index, image=uniform_frame)
first_image = uniform_stack.get_image(frame_index=0)
trap_image = first_image.get_subimage(trap_aabb)
ie.save_as_tiff(trap_image, '/home/graffy/Desktop/template.tiff')
similarity_image = ie.match_template(first_image, trap_image)
ie.save_as_tiff(similarity_image, '/home/graffy/Desktop/similarity.tiff')
matches = ie.find_maxima(similarity_image)
num_traps = len(matches)
print("number of traps found : %d" % num_traps)
num_expected_traps = 13 # 13 traps are completely visible in the
assert abs(num_traps - num_expected_traps) <= 1
#non_uniform_stack = sequence.as_stack()
#uniform_stack = IImageEngine.get_instance().divide(non_uniform_stack, white_estimate)
# IImageEngine.get_instance().save_as_tiff(white_estimate, './white_estimate.tiff')

View File

@ -10,7 +10,9 @@ import sys
from lipase.imageengine import IImageEngine, PixelType, Aabb
from lipase.imagej.ijimageengine import IJImageEngine, IJImage
from lipase.preprocessing import WhiteEstimator, correct_non_uniform_lighting
from lipase.traps_remover import remove_traps
from lipase.maxima_finder import MaximaFinder
from lipase.template_matcher import TemplateMatcher
from lipase.traps_detector import TrapsDetector
from lipase.catalog import ImageCatalog, Sequence
class TestLipase(unittest.TestCase):
@ -43,14 +45,25 @@ class TestLipase(unittest.TestCase):
non_uniform_sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0']
uniform_sequence = correct_non_uniform_lighting(non_uniform_sequence, 'DM300_nofilter_vis', white_estimator=WhiteEstimator(open_size=75, close_size=75, average_size=75))
def test_traps_removal(self):
def test_traps_detector(self):
# the typical value of peaks is -500 and the value between peaks is below -2500
threshold = -1500.0
tolerance = 1500
maxima_finder = MaximaFinder(threshold, tolerance)
template_matcher = TemplateMatcher(maxima_finder)
traps_detector = TrapsDetector(template_matcher)
sequence = self.catalog.sequences['res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0']
x_min = 423
x_max = 553
y_min = 419
y_max = 533
trap_aabb = Aabb(x_min, y_min, x_max, y_max)
remove_traps(sequence, 'DM300_nofilter_vis', trap_aabb)
matches = traps_detector.compute_traps_mask(sequence, 'DM300_nofilter_vis', trap_aabb)
num_traps = len(matches)
print("number of traps found : %d" % num_traps)
num_expected_traps = 13 # 13 traps are completely visible in the
self.assertAlmostEqual(len(matches), num_expected_traps, delta=1.0)
def run_script():