[1]:
# -*- coding: utf-8 -*-
#  Copyright 2023
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
#
#   Authored by:  Franck P. Vidal (Bangor University, UK)

gVXR a data reader for CIL#

This demo shows the result of the integration between gVirtualXray (gVXR) and the Core Imaging Library (CIL) that we developed during the CIL Training and Bring Your Own Data User Hackathon at Cambridge University. A cone-beam computed-tomography (CBCT) acquisition with Poisson noise is simulated with gVXR. The data is reconstructed with CIL version 23.0.1. The anthropomorphic phantom used is from the pEdiatRic dosimetRy personalized platfORm (ERROR) (https://error.upatras.gr/). P. Papadimitroulas et al., “A Review on Personalized Pediatric Dosimetry Applications Using Advanced Computational Tools,” in IEEE Transactions on Radiation and Plasma Medical Sciences, vol. 3, no. 6, pp. 607-620, Nov. 2019, doi: 10.1109/TRPMS.2018.2876562.” It corresponds to a 5-year old boy. It is available at https://gate.uca.fr/download/examples-tools.

Author: Franck Vidal

Version: 1.2, 8 Nov 2023

Aims of this session#

  • Simulate a CBCT scan acquisition using gVXR;

  • Add Poisson noise corresponding to a given number of photons per pixel; and

  • Reconstruct the CT volume using the Core Imaging Library (CIL).

In our simulation the source-to-object distance (SOD) is 1000mm, and the source-to-detector distance (SDD) is 1125mm. The beam spectrum is polychromatic. The voltage is 85 kV. The filtration is 0.1 mm of copper and 1 mm of aluminium. The energy response of the detector is considered. It mimics a 600-micron thick CsI scintillator. 15,000 photons per pixels are used. 600 projections of 512x512 pixels are taken.

Main parameters of the simulation

Main steps#

  1. Download the phantom data. Anthropomorphic data is used. It corresponds to a 5-year old boy.

  2. Extract surface meshes from the voxelied phantom.

  3. Simulate an X-ray radiograph of the virtual patient.

Corresponding radiograph

  1. Select the number of incident photons per pixel.

  2. Add the corresponding amount of Photonic noise.

X-ray projection with Poisson noise

  1. Create the flat-field images with the corresponding amount of Photonic noise.

Average flat-field image with Poisson noise

  1. Simulate a CT scan.

Scanning eometry

  1. Reconstruct the CT volume using the Core Imaging Library (CIL).

Visualisation of the reconstructed 3D volume

[2]:
%matplotlib inline

The working directory is not necessary the path of the Notebook on my Mac. Use either:

import pathlib
root_path = str(pathlib.Path().resolve())

or

root_path = str(globals()['_dh'][0])

to locate the path of the notebbok. This is useful to save output files.

[3]:
root_path = str(globals()['_dh'][0])

Create directories#

This step is needed to store some files.

[4]:
import os

def createDirectory(directory):
    # The directory does not exist
    if not os.path.exists(os.path.abspath(directory)):

        # Create the directory
        os.mkdir(os.path.abspath(directory))

createDirectory(root_path + "/input_data")
createDirectory(root_path + "/input_data/meshes")
createDirectory(root_path + "/output")

Import packages#

See environment.yml for a Conda environment file.

[5]:
import glob
import zipfile
import urllib

import pandas as pd

from IPython.display import display
from IPython.display import Image

import matplotlib.pyplot as plt # Plotting
import numpy as np

from tifffile import imread, imwrite

from IPython.display import display
from IPython.display import Image

import SimpleITK as sitk

import matplotlib # To plot images

font = {'family' : 'serif',
         'size'   : 10
       }
matplotlib.rc('font', **font)

# Uncomment the line below to use LaTeX fonts
# matplotlib.rc('text', usetex=True)

from tqdm.contrib import tzip


from ipywidgets import interact
import ipywidgets as widgets

import base64

from gvxrPython3 import gvxr
from gvxrPython3 import json2gvxr
from gvxrPython3.utils import visualise
from gvxrPython3.utils import plotScreenshot
from gvxrPython3.utils import interactPlotPowerLaw # Plot the X-ray image using a Power law look-up table
from gvxrPython3.utils import saveProjections # Plot the X-ray image in linear, log and power law scales
# gvxr.useLogFile()

from sitk2vtk import *
from reconstruct import *

if has_cil:
    from gvxrPython3.JSON2gVXRDataReader import *
    from cil.utilities.display import show_geometry
    from cil.utilities.jupyter import islicer
Thu Nov  9 10:42:58 2023 (WW) Spekpy is not installed, try Xpecgen instead.
Thu Nov  9 10:42:58 2023 (WW) Xpecgen is not installed either.
spekpy is not install, you won't be able to load a beam spectrum using spekpy
xpecgen is not install, you won't be able to load a beam spectrum using xpecgen
SimpleGVXR 2.0.7 (2023-11-06T13:48:17) [Compiler: GNU g++] on Linux
gVirtualXRay core library (gvxr) 2.0.7 (2023-11-06T13:48:16) [Compiler: GNU g++] on Linux
CIL detected
Tigre detected

Initialise GVXR using our JSON file#

[6]:
json_fname = root_path + "/Noise-CBCT.json"

# MS Windows
if os.name == "nt":
    json2gvxr.initGVXR(json_fname, renderer="EGL")
# MacOS
elif str(os.uname()).find("Darwin") >= 0:
    json2gvxr.initGVXR(json_fname, renderer="OPENGL")
# GNU/Linux
else:
    json2gvxr.initGVXR(json_fname, renderer="EGL")
Create an OpenGL context: 800x450
Thu Nov  9 10:42:59 2023 ---- Create window (ID: -1)
Thu Nov  9 10:42:59 2023 ---- Query the number of EGL devices
Thu Nov  9 10:42:59 2023 ---- Success
Thu Nov  9 10:42:59 2023 ---- Detected 3 EGL devices.
Thu Nov  9 10:42:59 2023 ---- Print the details here of every EGL device.
Thu Nov  9 10:42:59 2023 ---- Success
Thu Nov  9 10:42:59 2023 ---- Device 1/3:
Thu Nov  9 10:42:59 2023 ----   Device Extensions: EGL_NV_device_cuda EGL_EXT_device_drm EGL_EXT_device_drm_render_node EGL_EXT_device_query_name EGL_EXT_device_persistent_id
Thu Nov  9 10:42:59 2023 ---- Device 2/3:
Thu Nov  9 10:42:59 2023 ----   Device Extensions: EGL_EXT_device_drm EGL_EXT_device_drm_render_node
Thu Nov  9 10:42:59 2023 ---- Device 3/3:
Thu Nov  9 10:42:59 2023 ----   Device Extensions: EGL_MESA_device_software EGL_EXT_device_drm_render_node
Thu Nov  9 10:42:59 2023 ---- Initialise EGL
Thu Nov  9 10:42:59 2023 ---- EGL client extensions:
Thu Nov  9 10:42:59 2023 ---- EGL_EXT_platform_base EGL_EXT_device_base EGL_EXT_device_enumeration EGL_EXT_device_query EGL_KHR_client_get_all_proc_addresses EGL_EXT_client_extensions EGL_KHR_debug EGL_KHR_platform_x11 EGL_EXT_platform_x11 EGL_EXT_platform_device EGL_MESA_platform_surfaceless EGL_EXT_explicit_device EGL_KHR_platform_wayland EGL_EXT_platform_wayland EGL_KHR_platform_gbm EGL_MESA_platform_gbm EGL_EXT_platform_xcb
Thu Nov  9 10:42:59 2023 ---- Try to build an EGL display.
Thu Nov  9 10:42:59 2023 ---- Try to build an EGL display for Wayland platform.
Thu Nov  9 10:42:59 2023 (WW)   Success.
Thu Nov  9 10:42:59 2023 ---- EGL extensions:
Thu Nov  9 10:42:59 2023 (WW)   Retrieving EGL extensions failed
Thu Nov  9 10:42:59 2023 (WW) Failed to eglInitialize
Thu Nov  9 10:42:59 2023 (WW) display cannot be initialized
Thu Nov  9 10:42:59 2023 ---- Go through all the EGL devices to find a suitable display.
Thu Nov  9 10:42:59 2023 ---- Surfaceless have the lowest priority.
Thu Nov  9 10:42:59 2023 ---- Device 0's extensions: EGL_NV_device_cuda EGL_EXT_device_drm EGL_EXT_device_drm_render_node EGL_EXT_device_query_name EGL_EXT_device_persistent_id
Thu Nov  9 10:42:59 2023 ---- Device 1's extensions: EGL_EXT_device_drm EGL_EXT_device_drm_render_node
Thu Nov  9 10:42:59 2023 ---- Device 2's extensions: EGL_MESA_device_software EGL_EXT_device_drm_render_node
Thu Nov  9 10:42:59 2023 ---- Try to build an EGL display for Platform device 1/3.
Thu Nov  9 10:42:59 2023 (WW)   Success.
Thu Nov  9 10:42:59 2023 ---- EGL extensions:
Thu Nov  9 10:42:59 2023 (WW)   Retrieving EGL extensions failed
Thu Nov  9 10:42:59 2023 ---- EGL version: 1.5
Thu Nov  9 10:42:59 2023 ---- Bind the OpenGL API to EGL
Thu Nov  9 10:42:59 2023 ---- Create the context
Thu Nov  9 10:42:59 2023 ---- Create the surface
Thu Nov  9 10:42:59 2023 ---- Make the context current
Thu Nov  9 10:42:59 2023 ---- Initialise GLEW
Thu Nov  9 10:42:59 2023 ---- OpenGL version supported by this platform 4.5.0 NVIDIA 535.104.05
Thu Nov  9 10:42:59 2023 ---- OpenGL vendor:NVIDIA Corporation
Thu Nov  9 10:42:59 2023 ---- OpenGL renderer:NVIDIA GeForce RTX 4060 Ti/PCIe/SSE2
Thu Nov  9 10:42:59 2023 ---- OpenGL version:4.5.0 NVIDIA 535.104.05
Thu Nov  9 10:42:59 2023 ---- Use OpenGL 4.5.
Thu Nov  9 10:42:59 2023 ---- Initialise the X-ray renderer if needed and if possible

Create the output directory#

[7]:
raw_projection_output_dir = os.path.abspath(json2gvxr.getFilePath(json2gvxr.params["Scan"]["OutFolder"]))
print("The raw projections were saved in", raw_projection_output_dir)
createDirectory(raw_projection_output_dir + "/..")
createDirectory(raw_projection_output_dir)
The raw projections were saved in /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/raw_projections

Load our detector#

[8]:
json2gvxr.initDetector()
Set up the detector
        Detector position: [0.0, -150.0, 0.0, 'mm']
        Detector up vector: [0, 0, -1]
        Number of pixels: [512, 512]
        Detector number of pixels: [512, 512]
        Energy response: input_data/energyResponseDetector.txt in MeV
        Pixel spacing: [0.9765625, 0.9765625, 'mm']
[9]:
number_of_rows = json2gvxr.params["Detector"]["NumberOfPixels"][1]
number_of_cols = json2gvxr.params["Detector"]["NumberOfPixels"][0]

Load our source properties#

[10]:
json2gvxr.initSourceGeometry()
Set up the beam
        Source position: [0.0, 1000.0, 0.0, 'mm']
        Source shape: PointSource
[11]:
spectrum, unit_of_energy, energy_set, bin_sets = json2gvxr.initSpectrum(verbose=0)
Thu Nov  9 10:43:00 2023 ---- Initialise the renderer
[12]:
plt.figure(figsize=(10,5))
plt.plot(energy_set, bin_sets)
plt.xlabel("Energy [" + unit_of_energy + "]")
plt.ylabel("Photon count")
plt.title("Corresponding spectra")
[12]:
Text(0.5, 1.0, 'Corresponding spectra')
../../_images/demos_CBCT-acquisition-with-noise-and-reconstruction_21_1.png

Step 1. Download and extract the phantom data from a ZIP file.#

[13]:
if not os.path.exists("input_data/Pediatric phantom.zip"):
    urllib.request.urlretrieve("https://drive.uca.fr/f/384a08b5f73244cf9ead/?dl=1",
                               root_path + "/input_data/Pediatric phantom.zip")

if not os.path.exists("input_data/Pediatric phantom"):
    with zipfile.ZipFile(root_path + "/input_data/Pediatric phantom.zip","r") as zip_ref:
        zip_ref.extractall(root_path + "/input_data")

Step 2. Extract surface meshes from the voxelied phantom.#

Load the phantom

[14]:
phantom = sitk.ReadImage(root_path + "/input_data/Pediatric phantom/Pediatric_model.mhd")

Load the labels

[15]:
df = pd.read_csv(root_path + "/input_data/labels.dat")

Process every structure of the phantom

[16]:
meshes = []

for threshold, organ in tzip(df["Label"], df["Organs"],
                         desc="Processing anatomy"):

    # Ignore air
    if organ != "Air":

        mesh_fname = root_path + "/input_data/meshes/" + organ + ".stl"
        meshes.append(mesh_fname)

        # Only create the mesh if it does not exist
        if not os.path.exists(mesh_fname):

            # Threshold the phantom
            binary_image = (phantom == threshold)

            # Smooth the binary segmentation
            smoothed_binary_image = sitk.AntiAliasBinary(binary_image)

            # Create a VTK image
            vtkimg = sitk2vtk(smoothed_binary_image, centre=True)

            vtk_mesh = extractSurface(vtkimg, 0)
            writeSTL(vtk_mesh, mesh_fname)
[17]:
del phantom

Load our samples#

[18]:
json2gvxr.initSamples(verbose=0)
Thu Nov  9 10:43:01 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Muscle.stl    nb_faces:       1756726 nb_vertices:    5270178 bounding_box (in cm):   (-17.9687, -10.8887, -30.9017)  (16.6016, 11.1799, 28.6986)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Bone.stl      nb_faces:       541826  nb_vertices:    1625478 bounding_box (in cm):   (-16.7969, -23.6577, -30.9017)  (15.2152, 9.88865, 16.3501)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Stomach-Interior.stl  nb_faces:       9452    nb_vertices:    28356   bounding_box (in cm):   (-1.34334, -2.38867, -17.0041)  (4.16143, 3.05231, -8.50205)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Cartilage.stl nb_faces:       163322  nb_vertices:    489966  bounding_box (in cm):   (-16.7615, -4.32288, -30.9017)  (15.5041, 8.717, 16.6771)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Brain.stl     nb_faces:       124028  nb_vertices:    372084  bounding_box (in cm):   (-7.32082, -9.98695, 16.3501)   (7.50031, 5.78681, 28.1222)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Bladder.stl   nb_faces:       3712    nb_vertices:    11136   bounding_box (in cm):   (-3.78536, 2.11808, -30.9017)   (0.175804, 5.49461, -29.7572)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Gallbladder.stl       nb_faces:       4308    nb_vertices:    12924   bounding_box (in cm):   (-5.07422, -1.68659, -17.9851)  (-2.54188, 1.49065, -14.3881)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Heart.stl     nb_faces:       48172   nb_vertices:    144516  bounding_box (in cm):   (-3.78536, -3.07617, -9.15606)  (6.32529, 5.68903, 1.30801)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Kidneys-right.stl     nb_faces:       17512   nb_vertices:    52536   bounding_box (in cm):   (-7.69363, 1.73117, -18.9661)   (-2.47349, 7.23954, -10.4641)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Kidneys-left.stl      nb_faces:       16388   nb_vertices:    49164   bounding_box (in cm):   (1.37053, 3.46679, -17.9851)    (6.44388, 7.74184, -8.82905)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Small-Intestine.stl   nb_faces:       118532  nb_vertices:    355596  bounding_box (in cm):   (-7.48809, -2.95731, -30.9017)  (7.59416, 8.32697, -12.0991)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Large-Intestine.stl   nb_faces:       94336   nb_vertices:    283008  bounding_box (in cm):   (-4.66426, -1.67902, -30.4112)  (7.11153, 6.16473, -13.4071)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Liver.stl     nb_faces:       87800   nb_vertices:    263400  bounding_box (in cm):   (-9.35286, -3.73856, -19.2931)  (5.43096, 7.83896, -6.21304)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Lung-right.stl        nb_faces:       80364   nb_vertices:    241092  bounding_box (in cm):   (-9.47265, -3.16992, -8.82905)  (0.0788746, 8.15358, 6.54004)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Lung-left.stl nb_faces:       70736   nb_vertices:    212208  bounding_box (in cm):   (0.397666, -2.26504, -9.81006)  (8.28139, 8.52371, 6.21304)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Pancreas.stl  nb_faces:       14592   nb_vertices:    43776   bounding_box (in cm):   (-2.8088, -0.240234, -17.0041)  (5.6632, 4.32215, -10.1371)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Spleen.stl    nb_faces:       25468   nb_vertices:    76404   bounding_box (in cm):   (1.48829, -0.611202, -14.7151)  (8.10404, 7.94215, -6.86704)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Stomach.stl   nb_faces:       28680   nb_vertices:    86040   bounding_box (in cm):   (-3.47804, -2.58413, -17.0041)  (5.05955, 4.0295, -7.84805)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Thymus.stl    nb_faces:       3136    nb_vertices:    9408    bounding_box (in cm):   (-0.846352, -1.87282, -1.30801) (1.53113, 1.18326, 2.28901)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Eyes-right.stl        nb_faces:       3956    nb_vertices:    11868   bounding_box (in cm):   (-3.88504, -9.01112, 14.7151)   (-1.28679, -6.41928, 17.6581)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Eyes-left.stl nb_faces:       4116    nb_vertices:    12348   bounding_box (in cm):   (1.66718, -8.8147, 14.7151)     (4.47449, -6.12631, 17.6581)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Skull.stl     nb_faces:       327028  nb_vertices:    981084  bounding_box (in cm):   (-7.59598, -10.476, 7.84805)    (7.79064, 6.17931, 29.1032)
Thu Nov  9 10:43:02 2023 ---- file_name:        /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/input_data/meshes/Trachea.stl   nb_faces:       8588    nb_vertices:    25764   bounding_box (in cm):   (-3.48031, -0.996257, -2.61602) (3.39865, 5.09486, 10.1371)
[19]:
gvxr.moveToCentre()

ID = "root"
min_x, min_y, min_z, max_x, max_y, max_z = gvxr.getNodeAndChildrenBoundingBox(ID, "mm")
centre_x = (min_x + max_x) / 2.0
centre_y = (min_y + max_y) / 2.0
centre_z = (min_z + max_z) / 2.0

print("Bounding box:", [min_x, min_y, min_z], [max_x, max_y, max_z])
print("Bounding box centre:", [centre_x, centre_y, centre_z])
Bounding box: [-172.85147094726562, -174.18807983398438, -300.0243225097656] [172.85147094726562, 174.18809509277344, 300.0243225097656]
Bounding box centre: [0.0, 7.62939453125e-06, 0.0]

Step 3. Simulate an X-ray radiograph of the virtual patient.#

We create an X-ray image projection_in_MeV. By default the image is expressed in MeV. We convert it to keV for display as follows: projection_in_keV = projection_in_MeV / gvxr.getUnitOfEnergy("keV").

[20]:
projection_in_MeV = np.array(gvxr.computeXRayImage(), dtype=np.single)
projection_in_keV = projection_in_MeV / gvxr.getUnitOfEnergy("keV")
[21]:
gvxr.setWindowSize(500, 500) # Fix for MacOS
gvxr.displayScene()
plotScreenshot()
../../_images/demos_CBCT-acquisition-with-noise-and-reconstruction_36_0.png
[22]:
fname = root_path + "/output/visualisation.png"

if not os.path.exists(fname):

    plot = visualise(use_log=True)
    plot.grid_visible = False

    plot.display()
else:
    display(Image(fname, width=800))
../../_images/demos_CBCT-acquisition-with-noise-and-reconstruction_37_0.png
[23]:
if not os.path.exists(fname):
    if plot is not None:

        plot.fetch_screenshot()

        data = base64.b64decode(plot.screenshot)
        with open(fname,'wb') as fp:
            fp.write(data)

Step 4. Select the number of incident photons per pixel#

[24]:
fig_plot = None
def chooseNumberOfPhotonsPerPixel(xray_image: np.array, number_of_photons_per_pixel:int=15000, figsize=(10, 5)):

    """
    Use Matplotlib and a Jupyter widget to display the X-ray image with Poisson noise.
    The number of photons per pixel can be change interactively.

    @param xray_image: The image to display
    @number_of_photons_per_pixel: the number of photons per pixel (default: 15000)
    @gamma figsize: the size of the figure (default: (10, 5))
    """

    global target_number_of_photons_per_pixel, fig_plot
    target_number_of_photons_per_pixel = number_of_photons_per_pixel

    noisy_image = getNoisyImage(xray_image, number_of_photons_per_pixel)

    fig_plot, axs = plt.subplots(1, 2, figsize=figsize)
    ax_img = axs[0]

    ax_img.set_xticks([])
    ax_img.set_yticks([])

    ax_plt = axs[1]
    img = ax_img.imshow(noisy_image, cmap="gray")
    ax_img.plot([0, noisy_image.shape[1]], [0, noisy_image.shape[0]])
    profile, = ax_plt.plot(np.diag(noisy_image))

    # cbar = fig_plot.colorbar(img, orientation='vertical')
    title_str = "Photons per pixels: " + str(number_of_photons_per_pixel)
    ax_img.set_title(title_str)
    ax_plt.set_title("Diagonal profile")
    ax_plt.set_xlabel("Pixel position")
    ax_plt.set_ylabel("Integrated energy in MeV")
    plt.tight_layout()
    plt.margins(0,0)

    plt.close()

    ## Callback function: plot y=Acos(x+phi)
    def update_plot(number_of_photons_per_pixel):
        global target_number_of_photons_per_pixel
        target_number_of_photons_per_pixel = number_of_photons_per_pixel
        noisy_image = getNoisyImage(xray_image, number_of_photons_per_pixel)
        img = ax_img.imshow(noisy_image, cmap="gray")
        title_str = "Photons per pixels: " + str(number_of_photons_per_pixel)
        ax_img.set_title(title_str)
        # fig_plot.colorbar(img, cax=cbar.ax, orientation='vertical')

        profile_data = np.diag(noisy_image)
        profile.set_ydata(profile_data)
        ax_plt.set_ylim((0, profile_data.max()))

        display(fig_plot)

    interact(update_plot,
             number_of_photons_per_pixel=widgets.IntSlider(value=number_of_photons_per_pixel, min=10, max=50000, step=10, description="Photons/pixels"))

Step 5. Add the corresponding amount of Photonic noise#

[25]:
gvxr.enablePoissonNoise()

def getNoisyImage(x_ray_image_energy, target_number_of_photons_per_pixel):
    gvxr.setNumberOfPhotons(target_number_of_photons_per_pixel)
    return np.array(gvxr.computeXRayImage(), dtype=np.single)

You may use the slider to specify the number of photons emitted towards each pixel of the detector. It controls the amount of noise in the image. The simulated image will be very noisy if a small number is used.

[26]:
chooseNumberOfPhotonsPerPixel(projection_in_MeV, number_of_photons_per_pixel=15000, figsize=(10, 5))
[27]:
fig_plot.savefig(root_path + "/output/noisy-projection.png", dpi=72)
[28]:
print("Photons per pixels:", target_number_of_photons_per_pixel)
Photons per pixels: 15000

Step 6. Create the flat-field images with the corresponding amount of Photonic noise.#

Scanners, whether they are clinical or industrial, use the “flat-field correction” as a way to calibrate and correct scanners to ensure a high quality image. A lightfield is a scan image with no sample in place, and represents the maximum beam intensity for each pixel. This lightfield is then used with a darkfield; an image where the beam is off and represents the minimum intensity of each pixel.

\[corrected = \frac{raw - darkfield}{lightfield - darkfield}\]

We can use gVXR to create this lightfield image. As with all clinical scanners and many industrial scanners, this process is automatic, and result images have already been corrected. However, the user may still choose how many images are averaged to create the lightfield image. In more specialised scenarios such as synchrotrons and some lab systems, this has to be done manually. In simulations, the detector lacks any fixed-pattern noise (darkfield), however we still need to account for the beam’s photon count.

Note: flatfield correction still has to be done in order to balance out photon energy, even if no fixed-pattern noise is present!

You do not have to worry about how to apply the flat-field correction with gVXR. You may, however, use the slider below to specify the number of white images used to create the lightfield.

[29]:
white_slider = widgets.IntSlider(value=25, min=1, max=100, step=1, description='Number of flat images:')
white_slider
[30]:
print("Number of flat images:", white_slider.value)
json2gvxr.params["Scan"]["NumberOfWhiteImages"] = white_slider.value
Number of flat images: 50
[31]:
fname = root_path + "/output/flat.tif"

flats = []

for i in range(white_slider.value):
    flats.append(gvxr.getWhiteImage())

flat_field = np.average(flats, axis=0)

imwrite(fname, flat_field.astype(np.single), compression='zlib')
[32]:
total_energy_MeV = gvxr.getTotalEnergyWithDetectorResponse()


fig = plt.figure(figsize = (10, 10))
plt.title("Average flat image\n(" + str(white_slider.value) + " images, " + str(target_number_of_photons_per_pixel) + " photons per pixel)")
img = plt.imshow(flat_field, cmap='gray')
plt.colorbar(img)
plt.tight_layout()
plt.savefig(root_path + "/output/average-flat-field.png", dpi=72)
../../_images/demos_CBCT-acquisition-with-noise-and-reconstruction_52_0.png

Step 7. Simulate a CT scan#

[33]:
angles = json2gvxr.initScan()
Set up the CT Scan
[34]:
number_of_angles = json2gvxr.params["Scan"]["NumberOfProjections"]
angles = json2gvxr.doCTScan()
[35]:
if json2gvxr.white_image is not None:
    white_image = json2gvxr.white_image
    fname = root_path + "/output/flat.tif"
    imwrite(fname, flat_field.astype(np.single), compression='zlib')
[36]:
print("First angle:", angles[0])
print("Last angle:", angles[-1])
print("Number of angles:", number_of_angles)
First angle: 0.0
Last angle: 359.4
Number of angles: 600

Step 8. Reconstruct the CT volume using the Core Imaging Library (CIL)#

[37]:
reader = JSON2gVXRDataReader(file_name=json_fname)
data = reader.read()
reconstruction = reconstruct(data, "CONEBEAM", False, verbose=1)
[512, 512]
[0.9765625, 0.9765625]
Source shape: CONEBEAM
Use CIL
data.geometry 3D Cone-beam tomography
System configuration:
        Source position: [    0., -1000.,     0.]
        Rotation axis position: [0., 0., 0.]
        Rotation axis direction: [ 0.,  0., -1.]
        Detector position: [  0., 150.,   0.]
        Detector direction x: [-1.,  0.,  0.]
        Detector direction y: [ 0.,  0., -1.]
Panel configuration:
        Number of pixels: [512 512]
        Pixel size: [0.9765625 0.9765625]
        Pixel origin: bottom-left
Channel configuration:
        Number of channels: 1
Acquisition description:
        Number of positions: 600
        Angles 0-20 in degrees:
[ 0.       ,  0.6010017,  1.2020034,  1.803005 ,  2.4040067,  3.0050085,
  3.60601  ,  4.2070117,  4.8080134,  5.409015 ,  6.010017 ,  6.611018 ,
  7.21202  ,  7.8130217,  8.414023 ,  9.015025 ,  9.616027 , 10.217029 ,
 10.81803  , 11.419032 ]
Distances in units: units distance
Cone beam detected
Backend: Tigre
Use plugin directly:  False
FDK recon

Input Data:
        angle: 600
        vertical: 512
        horizontal: 512

Reconstruction Volume:
        vertical: 512
        horizontal_y: 512
        horizontal_x: 512

Reconstruction Options:
        Backend: tigre
        Filter: ram-lak
        Filter cut-off frequency: 1.0
        FFT order: 10
        Filter_inplace: False

[38]:
if has_cil:
    fig = show_geometry(data.geometry)
    fig.save(root_path  + "/Noise-CBCT/CT-geometry.png", dpi=72)
../../_images/demos_CBCT-acquisition-with-noise-and-reconstruction_60_0.png
Unable to save image
<Figure size 640x480 with 0 Axes>
[39]:
islicer(reconstruction, direction='vertical')
[40]:
islicer(reconstruction, direction='horizontal_x')
[41]:
reconstruction_as_array = reconstruction.as_array()
[42]:
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (15, 7))
plt.suptitle("CBCT reconstruction with noise and polychromatism")
ax1.imshow(reconstruction_as_array[ int(reconstruction_as_array.shape[0] * 0.5), :, :], cmap='gray')
ax2.imshow(reconstruction_as_array[ :, int(reconstruction_as_array.shape[1] * 0.25), :], cmap='gray')
ax3.imshow(reconstruction_as_array[ :, :, int(reconstruction_as_array.shape[2] * 0.5)], cmap='gray')
plt.subplots_adjust(top=1.25)
plt.savefig(root_path  + "/output/plotCT.png", dpi=72)
plt.show()
../../_images/demos_CBCT-acquisition-with-noise-and-reconstruction_64_0.png

Save the volume in a file using SimpleITK#

[43]:
fname = root_path  + "/output/CT_in_mu.mha"
[44]:
detector_size = np.array(gvxr.getDetectorSize("mm"))
number_of_pixels = np.array(gvxr.getDetectorNumberOfPixels())
spacing = detector_size / number_of_pixels

print("CT volume saved in", fname)
sitk_image = sitk.GetImageFromArray(reconstruction_as_array)
sitk_image.SetSpacing([spacing[0], spacing[0], spacing[1]])
sitk.WriteImage(sitk_image, fname, useCompression=True)
CT volume saved in /home/fpvidal/PROGRAMMING/GitHub/CIL-User-Showcase/006_gVXR2CIL/output/CT_in_mu.mha

Cleaning up#

Once we have finished it is good practice to clean up the OpenGL contexts and windows with the following command.

[45]:
gvxr.terminate()
Thu Nov  9 10:44:28 2023 ---- Destroy all the windows
Thu Nov  9 10:44:28 2023 ---- Destroy window 0(0x56254dcf3470)