Some general info:
Operating system: Windows 10
Slicer version: 5.3.0 - 2023.06.17
Expected behavior: 3D stack(s) imported from a CZI scene with minimum hassle for the user…
Actual behavior: Work in progress, 3D view requires manual adjustments, not fully integrated with Slicer yet.
Hello,
this is my first post here, I am trying to import a multichannel CZI confocal stack into 3D slicer, and found some Python code on GitHub for a napari reader, which I am repurposing for Slicer.
I’m still very far from having a working reader, but the code I have put together is showing some promise:
As I’m not quite sure what I’m able to attach to my first post, here is the code in its entirety:
Click to expand code (czireader.py)
Called as:
In the console, I added my working path with a sys.path.append()
and then:
fn = 'an_image.czi'
czireader.reader(fn)
For some reason, I was not able to do this from the Jupyter plugin:
File C:\PortableApps\Slicer 5.3.0-2023-06-17\bin\Python\sitkUtils.py:21, in PushVolumeToSlicer(sitkimage, targetNode, name, className)
19 # Create new node if needed
20 if not targetNode:
—> 21 targetNode = slicer.mrmlScene.AddNewNodeByClass(className, slicer.mrmlScene.GetUniqueNameByString(name))
22 targetNode.CreateDefaultDisplayNodes()
24 myNodeFullITKAddress = GetSlicerITKReadWriteAddress(targetNode)AttributeError: module ‘slicer’ has no attribute ‘mrmlScene’
The code so far:
# Original code lifted almost entirely from:
# https://github.com/BodenmillerGroup/napari-czifile2/blob/main/napari_czifile2/_reader.py
# https://github.com/BodenmillerGroup/napari-czifile2/blob/main/napari_czifile2/io.py
# All credit goes to @jwindhager https://github.com/jwindhager
import SimpleITK as sitk
import sitkUtils
import slicer
from multiprocessing import cpu_count
from pathlib import Path
from typing import Iterable, List, Optional, Union
from xml.etree import ElementTree
import numpy as np
from czifile import CziFile, DimensionEntryDV1, DirectoryEntryDV
from tifffile import lazyattr
class CZISceneFile(CziFile):
@staticmethod
def get_num_scenes(path: Union[str, Path], *args, **kwargs) -> int:
with CziFile(path, *args, **kwargs) as czi_file:
if "S" in czi_file.axes:
return czi_file.shape[czi_file.axes.index("S")]
return 1
def __init__(self, path: Union[str, Path], scene_index: int, *args, **kwargs):
super(CZISceneFile, self).__init__(str(path), *args, **kwargs)
self.scene_index = scene_index
@lazyattr
def pos_x_um(self) -> float:
return self.scale_x_um * min(
(dim_entry.start for dim_entry in self._iter_dim_entries("X")), default=0.0
)
@lazyattr
def pos_y_um(self) -> float:
return self.scale_y_um * min(
(dim_entry.start for dim_entry in self._iter_dim_entries("Y")), default=0.0
)
@lazyattr
def pos_z_um(self) -> float:
return self.scale_z_um * min(
(dim_entry.start for dim_entry in self._iter_dim_entries("Z")), default=0.0
)
@lazyattr
def pos_t_seconds(self) -> float:
return self.scale_t_seconds * min(
(dim_entry.start for dim_entry in self._iter_dim_entries("T")), default=0.0
)
@lazyattr
def scale_x_um(self) -> float:
return self._get_scale("X", multiplier=10.0**6)
@lazyattr
def scale_y_um(self) -> float:
return self._get_scale("Y", multiplier=10.0**6)
@lazyattr
def scale_z_um(self) -> float:
return self._get_scale("Z", multiplier=10.0**6)
@lazyattr
def scale_t_seconds(self) -> float:
return self._get_scale("T")
@lazyattr
def channel_names(self) -> Optional[List[str]]:
if "C" in self.axes:
channel_elements = self._metadata_xml.findall(
".//Metadata/Information/Image/Dimensions/Channels/Channel"
)
if len(channel_elements) == self.shape[self.axes.index("C")]:
return [c.attrib.get("Name", c.attrib["Id"]) for c in channel_elements]
return None
@lazyattr
def is_rgb(self) -> bool:
return "0" in self.axes and self.shape[self.axes.index("0")] > 1
def as_tzcyx0_array(self, *args, **kwargs) -> np.ndarray:
data = self.asarray(*args, **kwargs)
tzcyx0_axis_indices = []
if "T" in self.axes:
tzcyx0_axis_indices.append(self.axes.index("T"))
else:
tzcyx0_axis_indices.append(data.ndim)
data = np.expand_dims(data, -1)
if "Z" in self.axes:
tzcyx0_axis_indices.append(self.axes.index("Z"))
else:
tzcyx0_axis_indices.append(data.ndim)
data = np.expand_dims(data, -1)
if "C" in self.axes:
tzcyx0_axis_indices.append(self.axes.index("C"))
else:
tzcyx0_axis_indices.append(data.ndim)
data = np.expand_dims(data, -1)
tzcyx0_axis_indices.append(self.axes.index("Y"))
tzcyx0_axis_indices.append(self.axes.index("X"))
if "0" in self.axes:
tzcyx0_axis_indices.append(self.axes.index("0"))
else:
tzcyx0_axis_indices.append(data.ndim)
data = np.expand_dims(data, -1)
for axis_index in range(len(self.axes)):
if axis_index not in tzcyx0_axis_indices:
tzcyx0_axis_indices.append(axis_index)
data = data.transpose(tzcyx0_axis_indices)
data.shape = data.shape[:6]
return data
def _iter_dim_entries(self, dimension: str) -> Iterable[DimensionEntryDV1]:
for dir_entry in self.filtered_subblock_directory:
for dim_entry in dir_entry.dimension_entries:
if dim_entry.dimension == dimension:
yield dim_entry
def _get_scale(self, dimension: str, multiplier: float = 1.0):
scale_element = self._metadata_xml.find(
f'.//Metadata/Scaling/Items/Distance[@Id="{dimension}"]/Value'
)
if scale_element is not None:
scale = float(scale_element.text)
if scale > 0:
return scale * multiplier
return 1.0
@lazyattr
def _metadata_xml(self) -> ElementTree.Element:
return ElementTree.fromstring(self.metadata())
@lazyattr
def filtered_subblock_directory(self) -> List[DirectoryEntryDV]:
dir_entries = super(CZISceneFile, self).filtered_subblock_directory
return list(
filter(
lambda dir_entry: self._get_scene_index(dir_entry) == self.scene_index,
dir_entries,
)
)
@staticmethod
def _get_scene_index(dir_entry: DirectoryEntryDV) -> int:
scene_indices = {
dim_entry.start
for dim_entry in dir_entry.dimension_entries
if dim_entry.dimension == "S"
}
if len(scene_indices) == 0:
return 0
assert len(scene_indices) == 1
return scene_indices.pop()
def reader(paths):
layer_data = []
if not isinstance(paths, list):
paths = [paths]
for path in paths:
num_scenes = CZISceneFile.get_num_scenes(path)
for scene_index in range(num_scenes):
with CZISceneFile(path, scene_index) as f:
if f.is_rgb:
continue
data = f.as_tzcyx0_array(max_workers=cpu_count())
# https://github.com/BodenmillerGroup/napari-czifile2/issues/5
contrast_limits = None
if data.dtype == np.uint16:
contrast_limits = (0, 65535)
# https://github.com/napari/napari/issues/2348
data = data[:, :, :, :, :, 0]
shape = data.shape #t z c y x
for t in range(shape[0]):
for c in range(shape[2]):
#Here we load the volumes one at a time:
image_sitk = sitk.GetImageFromArray(data[t,:,c,:,:])
image_sitk.SetOrigin((f.pos_x_um, f.pos_y_um, f.pos_z_um))
image_sitk.SetSpacing((f.scale_x_um,f.scale_y_um,f.scale_z_um))
volumeNode = sitkUtils.PushVolumeToSlicer(image_sitk,None)
volumeNode.SetName(f.channel_names[c])
#volumeNode=slicer.mrmlScene.AddNewNodeByClass("vtkMRMLScalarVolumeNode")
#volumeNode.SetAndObserveImageData(imageData)
#volumeNode.SetSpacing(spacing)
#volumeNode.SetOrigin(origin)
#slicer.util.setSliceViewerLayers(volumeNode, fit=True)
slicer.util.setSliceViewerLayers(background=volumeNode, fit=True)
So, as you can see, I’m still struggling with really basic things such as:
- reading colors for each channel in the CZI file, and setting them for each of the imported volumes
- maybe some kind of GUI interface for the user to select which scene to import (there may be multiple scenes and timepoints, not sure how to handle those nicely yet)
- units: The code I lifted from @jwindhager conveniently provides a pixel size in microns, but they’re set as mm in Slicer (check whether the units are always mm or if it is possible to set the units)
- Follow the help provided in the Slicer help to turn the code into a reader plugin
- A few manual operations required to actually display the volume, maybe I can set more things in the reader to avoid the manual steps.
- How big can the images be? For now I’m not sure…
- RGB images? For now I’m only interested in confocal stacks, but maybe histology slides could also be handled through a potential future reader. For now I’m not going to worry too much about this.
And what would be the end goal?
After I finish writing this plugin, the two main image processing tasks I will need to perform are:
- 3-D registration of multiple acquired stacks using the DAPI nuclei as landmarks
- Segmentation of each cell in 3-D if possible (cellpose?)
And in terms of data output:
- Size / shape of cells and
- Mean intensity in all the different channels (up to 6) for each cell.
That’s it, thank you for reading and any advice you might have will be greatly appreciated!
Kind regards,
Egor