added method to compute the median image of a sequence, in order to estimate the background

also refactored to ease reading

note: these changes were done by me on 18/06/2019 and are still work in progress...
This commit is contained in:
Guillaume Raffy 2019-07-16 11:11:13 +02:00
parent 874755ad2f
commit 22d272733b
1 changed files with 69 additions and 20 deletions

View File

@ -7,8 +7,11 @@
# the 'greeting' output parameter, based on its type.
from ij import IJ # pylint: disable=import-error
from ij import ImagePlus # pylint: disable=import-error
from ij.process import ByteProcessor # pylint: disable=import-error
from ij.process import ImageStatistics
from ij.plugin import ImageCalculator
from ij.plugin import ZProjector
from ij import WindowManager
# greeting = "Hello, " + name + "!"
@ -120,6 +123,7 @@ class Sequence(object):
:return Sequence:
'''
# assert fixme : res_soleil2018/white/white_24112018_2/Pos0 is visible
seqid_to_white = {
'res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0': 'res_soleil2018/white/white_24112018_2/Pos0',
'res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos2': 'res_soleil2018/white/white_24112018_2/Pos0',
@ -127,8 +131,7 @@ class Sequence(object):
white_sequence = seqid_to_white[self.sequence_id]
return self.catalog.sequences[white_sequence]
def open_in_imagej(self):
# ip = IJ.createHyperStack(title=self.sequence_id, width=self.width, height= self.height, channels=1, slices=1, frames=self.get_num_frames(), bitdepth=16)
def as_hyperstack(self):
hyperstack = IJ.createHyperStack(self.sequence_id, self.width, self.height, self.num_channels, self.num_slices, self.num_frames, self.num_bits_per_pixels)
for channel_index in range(self.num_channels):
for frame_index in range(self.num_frames):
@ -139,6 +142,27 @@ class Sequence(object):
# print(src_image.getProperties())
hyperstack.setPositionWithoutUpdate(channel_index + 1, slice_index + 1, frame_index + 1)
hyperstack.setProcessor(src_image.getProcessor())
return hyperstack
def as_stack(self, channel_id):
'''
:param str channel_id: eg 'DM300_327-353_fluo'
'''
channel_index = self.get_channel_index(channel_id)
hyperstack = IJ.createHyperStack(self.sequence_id, self.width, self.height, 1, self.num_slices, self.num_frames, self.num_bits_per_pixels)
for frame_index in range(self.num_frames):
slice_index = 0
src_image_file_path = self.get_image_file_path(channel_index=channel_index, frame_index=frame_index)
# print(src_image_file_path)
src_image = IJ.openImage(src_image_file_path)
# print(src_image.getProperties())
hyperstack.setPositionWithoutUpdate(channel_index + 1, slice_index + 1, frame_index + 1)
hyperstack.setProcessor(src_image.getProcessor())
return hyperstack
def open_in_imagej(self):
# ip = IJ.createHyperStack(title=self.sequence_id, width=self.width, height= self.height, channels=1, slices=1, frames=self.get_num_frames(), bitdepth=16)
hyperstack = self.as_hyperstack()
hyperstack.show()
for channel_index in range(self.num_channels):
hyperstack.setPositionWithoutUpdate(channel_index + 1, 1, 1)
@ -204,6 +228,7 @@ class ImageLogger(IImageProcessingDebugger):
IImageProcessingDebugger.__init__(self)
def on_image(self, image, image_id):
# todo : WindowManager
image.show()
@ -242,7 +267,6 @@ class Lipase(object):
# print(stats.pixelCount)
return stats.mean
def test_get_image_median_value(self):
image_file_path = '/Users/graffy/ownCloud/ipr/lipase/raw-images/res_soleil2018/GGH/GGH_2018_cin2_phiG_I_327_vis_-40_1/Pos0/img_000000000_DM300_327-353_fluo_000.tif'
image = IJ.openImage(image_file_path)
@ -255,48 +279,64 @@ class Lipase(object):
:param ImagePlus src_image:
:param Sequence white_sequence:
'''
# self.debugger.on_image(src_image, 'src_image')
self.debugger.on_image(src_image, 'src_image')
white_sequence.open_in_imagej()
print('image type : ', src_image.getType())
src_median_value = self.get_image_median_value(src_image)
normalized_src = src_image.getProcessor().convertToFloat()
normalized_src.multiply(1.0 / src_median_value)
normalized_src_processor = src_image.getProcessor().convertToFloat()
normalized_src_processor.multiply(1.0 / src_median_value)
for z_index in range(white_sequence.num_slices):
white_image_file_path = white_sequence.get_image_file_path(channel_index=0, frame_index=0, z_index=z_index)
white_image = IJ.openImage(white_image_file_path)
# self.debugger.on_image(white_image, 'white_image')
white_median_value = self.get_image_median_value(white_image)
# white_to_src_factor = 1.0 / float(white_median_value)
normalized_white = white_image.getProcessor().convertToFloat()
normalized_white.multiply(1.0 / float(white_median_value))
normalized_white_processor = white_image.getProcessor().convertToFloat()
normalized_white_processor.multiply(1.0 / float(white_median_value))
imp1 = ImagePlus() # IJ.openImage("http://imagej.nih.gov/ij/images/boats.gif")
imp1.setProcessor(normalized_src)
normalized_src = ImagePlus() # IJ.openImage("http://imagej.nih.gov/ij/images/boats.gif")
normalized_src.setProcessor(normalized_src_processor)
# print(imp1)
imp2 = ImagePlus() # IJ.openImage("http://imagej.nih.gov/ij/images/bridge.gif")
imp2.setProcessor(normalized_white)
normalized_white = ImagePlus() # IJ.openImage("http://imagej.nih.gov/ij/images/bridge.gif")
normalized_white.setProcessor(normalized_white_processor)
# print(imp2)
ic = ImageCalculator()
imp3 = ic.run("sub create float", imp1, imp2)
difference_image = ic.run("sub create float", normalized_src, normalized_white)
# self.debugger.on_image(imp3, 'diff')
self.debugger.on_image(normalized_white, 'normalized_white')
self.debugger.on_image(difference_image, 'difference_image')
# imp2mat = ImagePlusMatConverter()
# white_mat = imp2mat.toMat(white_image.getProcessor())
# white_mat = imread(white_image_file_path)
# print(white_image)
sqdiff_image = ic.run("mul create float", imp3, imp3)
sqdiff_image = ic.run("mul create float", difference_image, difference_image)
mean_sqdiff = self.get_image_mean_value(sqdiff_image)
print('difference : ', mean_sqdiff)
def process_sequence(self, sequence_id):
def compute_sequence_median_image(self, sequence, channel_id='DM300_nofilter_vis'):
''' computes for each pixel its median value along time, so that in the end we have a single image that only shows structures (things that don't move such as the traps)
:param Sequence sequence:
:param str channel_id:
'''
:param str sequence_id:
'''
sequence = self.catalog.sequences[sequence_id]
sequence.open_in_imagej()
stack = sequence.as_stack(channel_id)
projector = ZProjector()
median_image = projector.run(stack, 'median')
# pixel_value_along_time = ByteProcessor.createProcessor(stack.getDepth(), 1)
return median_image
def extract_background(self, sequence):
channel_id = 'DM300_nofilter_vis'
sequence.as_stack(channel_id).show()
background_image = self.compute_sequence_median_image(sequence, channel_id)
background_image.show()
return background_image
def test_uniformization(self, sequence):
white_seq = sequence.get_white()
channel_index = sequence.get_channel_index('DM300_327-353_fluo')
src_image_file_path = sequence.get_image_file_path(channel_index=channel_index, frame_index=0, z_index=0)
@ -304,6 +344,15 @@ class Lipase(object):
# src_image = IJ.openImage(src_image_file_path)
self.find_depth_index(src_image, white_seq)
def process_sequence(self, sequence_id):
'''
:param str sequence_id:
'''
sequence = self.catalog.sequences[sequence_id]
self.extract_background(sequence)
self.test_uniformization(sequence)
def run_script():
lipase = Lipase(debugger=ImageLogger())