I am working on creating a particle detection and tracking python script that I will eventually use for deep learning. Currently, I believe that I am having issues with the tracks duplicating or not being set to finish when they disappear. My lab mate tried to help me trouble shoot it and that is where we identified the issue but were still unable to fix it. I have written out the code in a verbose version for me to have all variables shown in Spyder.
My current goal is to have it detect, track and then crop out the particles which I will use for deep learning. I know the detection is correctly working and it is tracking the particles but the cropped tracks seem to be too large.
Any help is appreciated!!! If there is a similar post to this, then a link would also be appreciated but I did not find one.
The error: runfile('C:/Users/bbraswe1/AST_ParticleTracking/untitled1.py', wdir='C:/Users/bbraswe1/AST_ParticleTracking') Traceback (most recent call last):
File "C:\Users\bbraswe1\AppData\Local\anaconda3\envs\torch\lib\site-packages\spyder_kernels\py3compat.py", line 356, in compat_exec exec(code, globals, locals)
File "c:\users\bbraswe1\ast_particletracking\untitled1.py", line 176, in cropped_image = tif_stack[frame, y - crop_size // 2:y + crop_size // 2, x - crop_size // 2:x + crop_size // 2]
IndexError: index 10 is out of bounds for axis 0 with size 10
The code:
import numpy as np
from skimage import io, filters
from filterpy.kalman import KalmanFilter
from scipy.spatial import cKDTree
from scipy.spatial.distance import cdist
from skimage import measure
import os
import tifffile
from pathlib import Path
import csv
import copy
# Parameters
stack_path = r"C:\Users\bbraswe1\Dropbox (ASU)\PC\Desktop\Usethis\PT_10.tif"
ct_output_folder = r"C:\Users\bbraswe1\Dropbox (ASU)\PC\Desktop\Usethis\CT"
csv_output_folder = r"C:\Users\bbraswe1\Dropbox (ASU)\PC\Desktop\Usethis\CSV\tracked_particles.csv"
object_diameter = 5
quality_threshold = 4
search_radius = 6
missing_frames = 50
shared_locations = 100
alpha = 0.2
beta = 0.8
min_duration = 1
fwhm_threshold = 5
crop_size = 12
# Load the .tif stack
tif_stack = tifffile.imread(stack_path)
# Process the .tif stack with the Difference of Gaussians (DoG) filter
sigma1 = object_diameter / np.sqrt(2)
sigma2 = object_diameter * np.sqrt(2)
dog_filtered_stack = []
for img in tif_stack:
# Apply DoG filter
filtered_image = filters.gaussian(img, sigma=sigma1) - filters.gaussian(img, sigma=sigma2)
# Threshold the filtered image
thresholded_image = filtered_image > quality_threshold * filtered_image.std()
# Label the thresholded image
labeled_image = measure.label(thresholded_image)
# Extract centroid properties
properties = measure.regionprops_table(labeled_image, properties=["centroid"])
centroids = np.column_stack((properties["centroid-0"], properties["centroid-1"]))
dog_filtered_stack.append(centroids)
# Initialize Kalman filter
kf = KalmanFilter(dim_x=6, dim_z=2)
kf.F = np.array([[1, 0, 1, 0, 0.5, 0],
[0, 1, 0, 1, 0, 0.5],
[0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
kf.H = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0]])
kf.P *= np.eye(6) * 1e4
kf.R = np.eye(2) * 2
kf.Q[-1, -1] *= 0.01
kf.Q[4, 4] *= 0.01
# Perform particle tracking
tracks = []
active_tracks = []
# Iterate through frames and detected particles
num_frames = len(dog_filtered_stack)
frame = 0
while frame < num_frames:
detected_particles = dog_filtered_stack[frame]
# Initialize new tracks for the first frame
if len(active_tracks) == 0:
for p in detected_particles:
new_track = {"kf": copy.deepcopy(kf), "track": [p], "last_seen": frame}
new_track["kf"].x[:2] = p.reshape(-1, 1)
active_tracks.append(new_track)
else:
# Build a k-d tree to find nearest neighbors efficiently
tree = cKDTree([t["track"][-1] for t in active_tracks])
# Query the tree to find matches between detected particles and active tracks
distances, matches = tree.query(detected_particles, distance_upper_bound=search_radius)
# Update the matched active tracks
used_particles = set()
i = 0
while i < len(matches):
m = matches[i]
if m != len(active_tracks):
active_tracks[m]["track"].append(detected_particles[i])
active_tracks[m]["kf"].update(detected_particles[i])
active_tracks[m]["last_seen"] = frame
used_particles.add(i)
i += 1
# Create new tracks for unmatched particles
i = 0
while i < len(detected_particles):
p = detected_particles[i]
if i not in used_particles:
new_track = {"kf": copy.deepcopy(kf), "track": [p], "last_seen": frame}
new_track["kf"].x[:2] = p.reshape(-1, 1)
active_tracks.append(new_track)
i += 1
# Move tracks that haven't been seen for a while to the list of finished tracks
for t in active_tracks:
if frame - t["last_seen"] > missing_frames:
tracks.append(t["track"])
active_tracks.remove(t)
else:
# Predict the next position of the particle
t["kf"].predict()
t["track"].append(t["kf"].x[:2].flatten()) # Flatten the position array before appending
# Update the Kalman filter's velocity estimate with drift compensation
if frame > 0:
grid_size = 10
grid_centers = np.mgrid[0.5 * shared_locations / grid_size:shared_locations:shared_locations / grid_size,
0.5 * shared_locations / grid_size:shared_locations:shared_locations / grid_size].reshape(2, -1).T
max_centroids = max(len(centroids) for centroids in dog_filtered_stack)
drift = np.zeros((grid_centers.shape[0], max_centroids, 2))
drift_avg = np.zeros_like(drift)
for frame_idx in range(1, len(dog_filtered_stack[:frame + 1])):
for p_idx, (p1, p2) in enumerate(zip(dog_filtered_stack[frame_idx - 1], dog_filtered_stack[frame_idx])):
region_idx = np.argmin(np.sum((grid_centers - p1) ** 2, axis=1))
drift[region_idx, p_idx] = p2 - p1
if frame_idx > 1:
drift_avg[region_idx, p_idx] = alpha * drift[region_idx, p_idx] + (1 - alpha) * drift_avg[region_idx, p_idx - 1]
for t in active_tracks:
region_idx = np.argmin(np.sum((grid_centers - t["track"][-1].flatten()) ** 2, axis=1))
updated_velocity = (beta * drift_avg[region_idx].mean(axis=0).reshape(-1, 1) + (1 - beta) * t["kf"].x[2:4])
# Check if the updated position would be out of bounds
updated_position = t["kf"].x[:2] + updated_velocity
if 0 <= updated_position[0, 0] < tif_stack.shape[1] and 0 <= updated_position[1, 0] < tif_stack.shape[2]:
t["kf"].x[2:4] = updated_velocity
frame += 1
# Add remaining active tracks to the list of finished tracks
for t in active_tracks:
tracks.append(t["track"])
# Filter the tracks based on duration and maximum displacement between consecutive points
filtered_tracks = []
for track in tracks:
if len(track) < min_duration:
continue
fwhm_ok = True
i = 0
while i < len(track) - 1:
p1 = track[i]
p2 = track[i + 1]
if np.linalg.norm(p2 - p1) > fwhm_threshold:
fwhm_ok = False
break
i += 1
if fwhm_ok:
filtered_tracks.append(track)
# Crop the tracks to generate image stacks centered around the particles
cropped_tracks = []
for track in filtered_tracks:
cropped_track = []
frame = 0
while frame < len(track):
position = track[frame]
x, y = np.round(position).astype(int)
cropped_image = tif_stack[frame, y - crop_size // 2:y + crop_size // 2, x - crop_size // 2:x + crop_size // 2]
# Check if the cropped image has the correct shape
if cropped_image.shape == (crop_size, crop_size):
cropped_track.append(cropped_image)
frame += 1
# Ensure the track has at least one valid image before appending
if cropped_track:
cropped_tracks.append(cropped_track)
# Save cropped tracks to disk
Path(ct_output_folder).mkdir(parents=True, exist_ok=True)
i = 0
while i < len(cropped_tracks):
track = cropped_tracks[i]
track_path = os.path.join(ct_output_folder, f"particle_{i + 1}.tif")
tifffile.imwrite(track_path, track, photometric='minisblack', metadata={'axes': 'YX'})
i += 1
# Calculate particle intensities
intensities = []
for track in filtered_tracks:
track_intensities = []
frame = 0
while frame < len(track):
position = track[frame]
x, y = np.round(position).astype(int)
intensity = tif_stack[frame, y, x]
track_intensities.append(intensity)
frame += 1
intensities.append(track_intensities)
# Creates a CSV file showing the detected particles found in each frame including the intensity of the particle as well
with open(csv_output_folder, "w", newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(["Particle", "Frames Tracked", "Intensities"])
i = 0
while i < len(filtered_tracks):
track = filtered_tracks[i]
track_intensities = intensities[i]
frames_tracked = len(track)
intensity_str = ", ".join([f"{intensity:.2f}" for intensity in track_intensities])
csvwriter.writerow([i + 1, frames_tracked, intensity_str])
i += 1
Wrote out a verbose version of the code to have all my variables visible. Identified the issue being with the possible number of tracks and/or duplication without removing finished tracks (but I could be wrong).The goal here is to have it detect what frame a particle starts, track that (with a leniency of 5 gap frames) until the particle disappears, send that track to a finished track list which will be used to then crop that particle out with all the frames it is found in (in sequential order).