Skip to content

Commit 14b7af0

Browse files
committed
Add scipy solver, move the main functionality to data
1 parent 11a1399 commit 14b7af0

File tree

5 files changed

+1065
-275
lines changed

5 files changed

+1065
-275
lines changed

monai/data/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,3 +150,5 @@ def reduce_meta_tensor(meta_tensor):
150150
return _rebuild_meta, (type(meta_tensor), storage, dtype, metadata)
151151

152152
ForkingPickler.register(MetaTensor, reduce_meta_tensor)
153+
154+
from .ultrasound_confidence_map import UltrasoundConfidenceMap
Lines changed: 376 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,376 @@
1+
# Copyright (c) MONAI Consortium
2+
# Licensed under the Apache License, Version 2.0 (the "License");
3+
# you may not use this file except in compliance with the License.
4+
# You may obtain a copy of the License at
5+
# http://www.apache.org/licenses/LICENSE-2.0
6+
# Unless required by applicable law or agreed to in writing, software
7+
# distributed under the License is distributed on an "AS IS" BASIS,
8+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9+
# See the License for the specific language governing permissions and
10+
# limitations under the License.
11+
12+
from __future__ import annotations
13+
14+
from typing import Literal, Optional, Tuple
15+
16+
import numpy as np
17+
18+
from monai.utils import min_version, optional_import
19+
20+
__all__ = ["UltrasoundConfidenceMap"]
21+
22+
cv2, _ = optional_import("cv2")
23+
Oct2Py, _ = optional_import("oct2py", "5.6.0", min_version, "Oct2Py")
24+
csc_matrix, _ = optional_import("scipy.sparse", "1.7.1", min_version, "csc_matrix")
25+
spsolve, _ = optional_import("scipy.sparse.linalg", "1.7.1", min_version, "spsolve")
26+
hilbert, _ = optional_import("scipy.signal", "1.7.1", min_version, "hilbert")
27+
28+
29+
class UltrasoundConfidenceMap:
30+
"""Compute confidence map from an ultrasound image.
31+
This transform uses the method introduced by Karamalis et al. in https://doi.org/10.1016/j.media.2012.07.005.
32+
It generates a confidence map by setting source and sink points in the image and computing the probability
33+
for random walks to reach the source for each pixel.
34+
35+
Args:
36+
alpha (float, optional): Alpha parameter. Defaults to 2.0.
37+
beta (float, optional): Beta parameter. Defaults to 90.0.
38+
gamma (float, optional): Gamma parameter. Defaults to 0.05.
39+
mode (str, optional): 'RF' or 'B' mode data. Defaults to 'B'.
40+
sink_mode (str, optional): Sink mode. Defaults to 'all'. If 'mask' is selected, a mask must be when calling the transform.
41+
"""
42+
43+
def __init__(
44+
self,
45+
alpha: float = 2.0,
46+
beta: float = 90.0,
47+
gamma: float = 0.05,
48+
mode: Literal["RF", "B"] = "B",
49+
sink_mode: Literal["all", "mid", "min", "mask"] = "all",
50+
backend: Literal["scipy", "octave"] = "scipy",
51+
):
52+
53+
# The hyperparameters for confidence map estimation
54+
self.alpha = alpha
55+
self.beta = beta
56+
self.gamma = gamma
57+
self.mode = mode
58+
self.sink_mode = sink_mode
59+
self.backend = backend
60+
61+
# The precision to use for all computations
62+
self.eps = np.finfo("float64").eps
63+
64+
# Store sink indices for external use
65+
self._sink_indices = np.array([], dtype="float64")
66+
67+
if self.backend == "octave":
68+
# Octave instance for computing the confidence map
69+
self.oc = Oct2Py()
70+
71+
def sub2ind(self, size: Tuple[int], rows: np.ndarray, cols: np.ndarray) -> np.ndarray:
72+
"""Converts row and column subscripts into linear indices,
73+
basically the copy of the MATLAB function of the same name.
74+
https://www.mathworks.com/help/matlab/ref/sub2ind.html
75+
76+
This function is Pythonic so the indices start at 0.
77+
78+
Args:
79+
size Tuple[int]: Size of the matrix
80+
rows (np.ndarray): Row indices
81+
cols (np.ndarray): Column indices
82+
83+
Returns:
84+
indices (np.ndarray): 1-D array of linear indices
85+
"""
86+
indices = rows + cols * size[0]
87+
return indices
88+
89+
def get_seed_and_labels(
90+
self, data: np.ndarray, sink_mode: str = "all", sink_mask: Optional[np.ndarray] = None
91+
) -> Tuple[np.ndarray, np.ndarray]:
92+
"""Get the seed and label arrays for the max-flow algorithm
93+
94+
Args:
95+
data: Input array
96+
sink_mode (str, optional): Sink mode. Defaults to 'all'.
97+
sink_mask (np.ndarray, optional): Sink mask. Defaults to None.
98+
99+
Returns:
100+
Tuple[np.ndarray, np.ndarray]: Seed and label arrays
101+
"""
102+
103+
# Seeds and labels (boundary conditions)
104+
seeds = np.array([], dtype="float64")
105+
labels = np.array([], dtype="float64")
106+
107+
# Indices for all columns
108+
sc = np.arange(data.shape[1], dtype="float64")
109+
110+
# SOURCE ELEMENTS - 1st matrix row
111+
# Indices for 1st row, it will be broadcasted with sc
112+
sr_up = np.array([0])
113+
seed = self.sub2ind(data.shape, sr_up, sc).astype("float64")
114+
seed = np.unique(seed)
115+
seeds = np.concatenate((seeds, seed))
116+
117+
# Label 1
118+
label = np.ones_like(seed)
119+
labels = np.concatenate((labels, label))
120+
121+
# Create seeds for sink elements
122+
123+
if sink_mode == "all":
124+
# All elements in the last row
125+
sr_down = np.ones_like(sc) * (data.shape[0] - 1)
126+
self._sink_indices = np.array([sr_down, sc], dtype="int32")
127+
seed = self.sub2ind(data.shape, sr_down, sc).astype("float64")
128+
129+
elif sink_mode == "mid":
130+
# Middle element in the last row
131+
sc_down = np.array([data.shape[1] // 2])
132+
sr_down = np.ones_like(sc_down) * (data.shape[0] - 1)
133+
self._sink_indices = np.array([sr_down, sc_down], dtype="int32")
134+
seed = self.sub2ind(data.shape, sr_down, sc_down).astype("float64")
135+
136+
elif sink_mode == "min":
137+
# Minimum element in the last row (excluding 10% from the edges)
138+
ten_percent = int(data.shape[1] * 0.1)
139+
min_val = np.min(data[-1, ten_percent:-ten_percent])
140+
min_idxs = np.where(data[-1, ten_percent:-ten_percent] == min_val)[0] + ten_percent
141+
sc_down = min_idxs
142+
sr_down = np.ones_like(sc_down) * (data.shape[0] - 1)
143+
self._sink_indices = np.array([sr_down, sc_down], dtype="int32")
144+
seed = self.sub2ind(data.shape, sr_down, sc_down).astype("float64")
145+
146+
elif sink_mode == "mask":
147+
# All elements in the mask
148+
coords = np.where(sink_mask != 0)
149+
sr_down = coords[0]
150+
sc_down = coords[1]
151+
self._sink_indices = np.array([sr_down, sc_down], dtype="int32")
152+
seed = self.sub2ind(data.shape, sr_down, sc_down).astype("float64")
153+
154+
seed = np.unique(seed)
155+
seeds = np.concatenate((seeds, seed))
156+
157+
# Label 2
158+
label = np.ones_like(seed) * 2
159+
labels = np.concatenate((labels, label))
160+
161+
return seeds, labels
162+
163+
def normalize(self, inp: np.ndarray) -> np.ndarray:
164+
"""Normalize an array to [0, 1]"""
165+
return (inp - np.min(inp)) / (np.ptp(inp) + self.eps)
166+
167+
def attenuation_weighting(self, A: np.ndarray, alpha: float) -> np.ndarray:
168+
"""Compute attenuation weighting
169+
170+
Args:
171+
A (np.ndarray): Image
172+
alpha: Attenuation coefficient (see publication)
173+
174+
Returns:
175+
W (np.ndarray): Weighting expressing depth-dependent attenuation
176+
"""
177+
178+
# Create depth vector and repeat it for each column
179+
Dw = np.linspace(0, 1, A.shape[0], dtype="float64")
180+
Dw = np.tile(Dw.reshape(-1, 1), (1, A.shape[1]))
181+
182+
W = 1.0 - np.exp(-alpha * Dw) # Compute exp inline
183+
184+
return W
185+
186+
def confidence_laplacian(
187+
self, P: np.ndarray, A: np.ndarray, beta: float, gamma: float
188+
) -> csc_matrix: # type: ignore
189+
"""Compute 6-Connected Laplacian for confidence estimation problem
190+
191+
Args:
192+
P (np.ndarray): The index matrix of the image with boundary padding.
193+
A (np.ndarray): The padded image.
194+
beta (float): Random walks parameter that defines the sensitivity of the Gaussian weighting function.
195+
gamma (float): Horizontal penalty factor that adjusts the weight of horizontal edges in the Laplacian.
196+
197+
Returns:
198+
L (csc_matrix): The 6-connected Laplacian matrix used for confidence map estimation.
199+
"""
200+
201+
m, _ = P.shape
202+
203+
P = P.T.flatten()
204+
A = A.T.flatten()
205+
206+
p = np.where(P > 0)[0]
207+
208+
i = P[p] - 1 # Index vector
209+
j = P[p] - 1 # Index vector
210+
# Entries vector, initially for diagonal
211+
s = np.zeros_like(p, dtype="float64")
212+
213+
vl = 0 # Vertical edges length
214+
215+
edge_templates = [
216+
-1, # Vertical edges
217+
1,
218+
m - 1, # Diagonal edges
219+
m + 1,
220+
-m - 1,
221+
-m + 1,
222+
m, # Horizontal edges
223+
-m,
224+
]
225+
226+
vertical_end = None
227+
diagonal_end = None
228+
229+
for iter_idx, k in enumerate(edge_templates):
230+
231+
Q = P[p + k]
232+
233+
q = np.where(Q > 0)[0]
234+
235+
ii = P[p[q]] - 1
236+
i = np.concatenate((i, ii))
237+
jj = Q[q] - 1
238+
j = np.concatenate((j, jj))
239+
W = np.abs(A[p[ii]] - A[p[jj]]) # Intensity derived weight
240+
s = np.concatenate((s, W))
241+
242+
if iter_idx == 1:
243+
vertical_end = s.shape[0] # Vertical edges length
244+
elif iter_idx == 5:
245+
diagonal_end = s.shape[0] # Diagonal edges length
246+
247+
# Normalize weights
248+
s = self.normalize(s)
249+
250+
# Horizontal penalty
251+
s[:vertical_end] += gamma
252+
# s[vertical_end:diagonal_end] += gamma * np.sqrt(2) # --> In the paper it is sqrt(2) since the diagonal edges are longer yet does not exist in the original code
253+
254+
# Normalize differences
255+
s = self.normalize(s)
256+
257+
# Gaussian weighting function
258+
s = -(
259+
(np.exp(-beta * s, dtype="float64")) + 1.0e-6
260+
) # --> This epsilon changes results drastically default: 1.e-6
261+
262+
# Create Laplacian, diagonal missing
263+
L = csc_matrix((s, (i, j)))
264+
265+
# Reset diagonal weights to zero for summing
266+
# up the weighted edge degree in the next step
267+
L.setdiag(0)
268+
269+
# Weighted edge degree
270+
D = np.abs(L.sum(axis=0).A)[0]
271+
272+
# Finalize Laplacian by completing the diagonal
273+
L.setdiag(D)
274+
275+
return L
276+
277+
def _solve_linear_system(self, D, rhs, tol=1.0e-8, mode="scipy"):
278+
279+
if mode == "scipy":
280+
X = spsolve(D, rhs)
281+
282+
elif mode == "octave":
283+
X = self.oc.mldivide(D, rhs)[:, 0]
284+
285+
return X
286+
287+
def confidence_estimation(self, A, seeds, labels, beta, gamma, backend):
288+
"""Compute confidence map
289+
290+
Args:
291+
A (np.ndarray): Processed image.
292+
seeds (np.ndarray): Seeds for the random walks framework. These are indices of the source and sink nodes.
293+
labels (np.ndarray): Labels for the random walks framework. These represent the classes or groups of the seeds.
294+
beta: Random walks parameter that defines the sensitivity of the Gaussian weighting function.
295+
gamma: Horizontal penalty factor that adjusts the weight of horizontal edges in the Laplacian.
296+
297+
Returns:
298+
map: Confidence map which shows the probability of each pixel belonging to the source or sink group.
299+
"""
300+
301+
# Index matrix with boundary padding
302+
G = np.arange(1, A.shape[0] * A.shape[1] + 1).reshape(A.shape[1], A.shape[0]).T
303+
pad = 1
304+
305+
G = np.pad(G, (pad, pad), "constant", constant_values=(0, 0))
306+
B = np.pad(A, (pad, pad), "constant", constant_values=(0, 0))
307+
308+
# Laplacian
309+
D = self.confidence_laplacian(G, B, beta, gamma)
310+
311+
# Select marked columns from Laplacian to create L_M and B^T
312+
B = D[:, seeds]
313+
314+
# Select marked nodes to create B^T
315+
N = np.sum(G > 0).item()
316+
i_U = np.setdiff1d(np.arange(N), seeds.astype(int)) # Index of unmarked nodes
317+
B = B[i_U, :]
318+
319+
# Remove marked nodes from Laplacian by deleting rows and cols
320+
keep_indices = np.setdiff1d(np.arange(D.shape[0]), seeds)
321+
D = csc_matrix(D[keep_indices, :][:, keep_indices])
322+
323+
# Define M matrix
324+
M = np.zeros((seeds.shape[0], 1), dtype="float64")
325+
M[:, 0] = labels == 1
326+
327+
# Right-handside (-B^T*M)
328+
rhs = -B @ M # type: ignore
329+
330+
# Solve linear system
331+
x = self._solve_linear_system(D, rhs, tol=1.0e-3, mode=backend)
332+
333+
# Prepare output
334+
probabilities = np.zeros((N,), dtype="float64")
335+
# Probabilities for unmarked nodes
336+
probabilities[i_U] = x
337+
# Max probability for marked node
338+
probabilities[seeds[labels == 1].astype(int)] = 1.0
339+
340+
# Final reshape with same size as input image (no padding)
341+
probabilities = probabilities.reshape((A.shape[1], A.shape[0])).T
342+
343+
return probabilities
344+
345+
def __call__(self, data: np.ndarray, sink_mask: Optional[np.ndarray] = None) -> np.ndarray:
346+
"""Compute the confidence map
347+
348+
Args:
349+
data (np.ndarray): RF ultrasound data (one scanline per column)
350+
351+
Returns:
352+
map (np.ndarray): Confidence map
353+
"""
354+
355+
# Normalize data
356+
data = data.astype("float64")
357+
data = self.normalize(data)
358+
359+
if self.mode == "RF":
360+
# MATLAB hilbert applies the Hilbert transform to columns
361+
data = np.abs(hilbert(data, axis=0)).astype("float64") # type: ignore
362+
363+
seeds, labels = self.get_seed_and_labels(data, self.sink_mode, sink_mask)
364+
365+
# Attenuation with Beer-Lambert
366+
W = self.attenuation_weighting(data, self.alpha)
367+
368+
# Apply weighting directly to image
369+
# Same as applying it individually during the formation of the
370+
# Laplacian
371+
data = data * W
372+
373+
# Find condidence values
374+
map_ = self.confidence_estimation(data, seeds, labels, self.beta, self.gamma, self.backend)
375+
376+
return map_

monai/transforms/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@
129129
ShiftIntensity,
130130
StdShiftIntensity,
131131
ThresholdIntensity,
132+
UltrasoundConfidenceMapTransform,
132133
)
133134
from .intensity.dictionary import (
134135
AdjustContrastd,

0 commit comments

Comments
 (0)