HV SDK
The HV SDK is our high-level interface to HSI related functionality with both Python and C/C++ APIs.
Using the SDK is the recommended way to interact with the Hypervision cameras and HSI datacubes.
You can find a number of usage examples (as well as the API reference) here.
The HSI Tools library is still under development: new features and improvements/bug fixes are constantly being added.
The alpha version has been released in March 2025 and live data capture support was added in September 2025.
Currently both python and C bindings are available.
Data capture
Refer to the HV SDK section and the quick start guide for complete examples.
PCA
import hsi
from hsi.preprocessing import make_reference, reflectance_calibration
import numpy as np
from sklearn.decomposition import PCA
# For showing the results
import matplotlib.pyplot as plt
# Input data
img = hsi.open("HSI_data/nuts/mix1_1296x1000x900_imageCube.pam")
dark = hsi.open("HSI_data/nuts/dark_ref_1296x100x900_imageCube.pam")
white = hsi.open("HSI_data/nuts/white_ref_1296x100x900_imageCube.pam")
CHANNEL = 450
if img.header.interleave == hsi.Interleave.BSQ:
i = img.to_numpy()[CHANNEL,:,:]
elif img.header.interleave == hsi.Interleave.BIL:
i = img.to_numpy()[:,CHANNEL,:]
else:
i = img.to_numpy()[:,:,CHANNEL]
plt.figure(figsize=(10, 8))
plt.imshow(i, cmap='gray')
plt.title(f"Visualization of Image at band {CHANNEL}")
plt.xlabel("Width")
plt.ylabel("Height")
plt.colorbar(label='Pixel Intensity')
# Reflectance Calibration
dark_ref = make_reference(dark)
white_ref = make_reference(white)
reflectance = reflectance_calibration(img, white_ref, dark_ref, clip=True)
if reflectance.header.interleave == hsi.Interleave.BSQ:
r = reflectance.to_numpy()[CHANNEL,:,:]
elif reflectance.header.interleave == hsi.Interleave.BIL:
r = reflectance.to_numpy()[:,CHANNEL,:]
else:
r = reflectance.to_numpy()[:,:,CHANNEL]
plt.figure(figsize=(10, 8))
plt.imshow(r, cmap='gray')
plt.title(f"Visualization of Reflectance at band {CHANNEL}")
plt.xlabel("Width")
plt.ylabel("Height")
plt.colorbar(label='Reflectance')
plt.show()
################### PCA
def gen_select(img, n_samples_per_line=10):
"""Sample randomly (with the same number of samples per line) from the image."""
# Assumes BIL (doesn't work for BSQ/BIP)
def select(plane):
sample = np.random.choice(np.arange(plane.shape[1]), size=n_samples_per_line)
sample = plane[:, sample]
return sample
return img.ufunc(select) # Any Python function can be passed here
# Convert interleave type to BIL
reflectance = reflectance.to_interleave(hsi.Interleave.BIL)
# Get subsample from image in memory-efficient manner
s_out = gen_select(reflectance).to_numpy()
s_out = s_out.transpose((0, 2, 1))
s_out = s_out.reshape((-1, s_out.shape[2]))
# Fit model
n_components = 6
model = PCA(n_components)
model.fit(s_out)
loadings = model.components_
num_features = loadings.shape[1]
# --- Plotting the Loadings Grid ---
# Create a 3x2 grid of subplots
fig, axes = plt.subplots(3, 2, figsize=(15, 15))
# Use ax.flatten() to easily iterate through the subplots
axes = axes.flatten()
# Iterate through each of the 6 components and plot the loadings
for i in range(n_components):
# Select the i-th row from the components_ matrix
component_loadings = model.components_[i, :]
# Plot the loadings on the current subplot
axes[i].plot(range(num_features), component_loadings, color='blue', alpha=0.8)
axes[i].set_title(f"Loadings for PC{i + 1}")
axes[i].set_xlabel("Original Feature Index")
axes[i].set_ylabel("Loading Value")
axes[i].grid(True, linestyle='--', alpha=0.6)
# Adjust the layout to prevent titles from overlapping
plt.tight_layout()
# Show the final plot
plt.show()
# Create transformation
hs_model = pca_helper(model)
out = hs_model(reflectance)
res = out.to_numpy()
# Alternatively, if you don't want to use the pca_helper
#out = reflectance.dot(model.components_.T, hsi.bands)
#res = out.to_numpy()
pc_images = [res[:, i, :] for i in range(res.shape[1])]
# Create a 3x2 grid of subplots with a larger figure size for better viewing
fig, axes = plt.subplots(3, 2, figsize=(10, 15))
# Use ax.flatten() to iterate through the axes easily
axes = axes.flatten()
# Iterate through the 6 images and plot them
for i, ax in enumerate(axes):
im = ax.imshow(pc_images[i], cmap='gray')
ax.set_title(f"Principal Component {i + 1}")
ax.set_axis_off() # Hide the axes for a cleaner look
# Add a single color bar for the entire figure
# Use the last plotted image (im) to get the color map information
fig.subplots_adjust(right=0.85) # Make space for the color bar
cbar_ax = fig.add_axes([0.88, 0.15, 0.04, 0.7])
fig.colorbar(im, cax=cbar_ax, label='Component Value')
# Adjust layout and show the plot
plt.tight_layout(rect=[0, 0, 0.85, 1])
plt.show()
# Plot the first two principal components
plt.figure(figsize=(10, 8))
plt.scatter(pc_images[0], pc_images[1], alpha=0.5, s=1)
plt.title("PCA Scatter Plot of First Two Components")
plt.xlabel("Principal Component 1")
plt.ylabel("Principal Component 2")
plt.grid(True)
plt.show()
Using ROIs from HV-Explorer
The HV Explorer has the option to save/load annotation data/
regions of interest (ROIs).
To save annotation data click the Save annotations icon (floppy disk) on the
right side of the top bar of the Spectra Settings
tab.
Note that this example expects a "Label Property" called "type" to also be
present together with the ROIs in order to group them in classes.
In order to create a "Label Property" click on the
Add property icon on the left side of the top
bar of the Spectra Settings tab.
These saved annotations can then be loaded into a python script and, for example, be used to mark the PCA regions they belong to and subsequently used for classification, as shown bellow.
from skimage.draw import polygon
HV_EXPLORER_VERSION = "0.2.6"
...
# Create transformation
hs_model = pca_helper(model)
out = hs_model(reflectance)
res = out.to_numpy()
height, n, width = res.shape
pc_images = [res[:, i, :] for i in range(res.shape[1])]
# Annotation data from HV-Explorer
# made for nuts image: "mix1_1296x1000x900_imageCube.pam"
# TODO: load from file (JSON data)
annotations_data = {
"annotations":[
{"title":"almond1","uuid":"8bd8c222-f29f-54f5-88b6-5c78a59f0f2c","descriptor":{"annot":"polyline","points":[[447,333],[460,322],[486,324],[505,337],[513,355],[495,361],[475,354],[461,346]],"width":66,"height":39,"x":447,"y":322},"image_path":"mix1_1296x1000x900_imageCube.pam","color":"#ff7f0e","properties":{"type":"almond"}},
{"title":"almond2","uuid":"8bd8c222-f29f-54f5-88b6-5c78a59f0f2c","descriptor":{"annot":"polyline","points":[[395,291],[384,304],[400,317],[433,309],[458,286],[444,277],[415,285]],"width":73,"height":41,"x":384,"y":277},"image_path":"mix1_1296x1000x900_imageCube.pam","color":"#2ca02c","properties":{"type":"almond"}},
{"title":"almond3","uuid":"8bd8c222-f29f-54f5-88b6-5c78a59f0f2c","descriptor":{"annot":"ellipse","cx":521,"cy":301,"a":84.0824759720031,"b":34.99025710387992,"angle":8.030087480156281,"max_dist":84},"image_path":"mix1_1296x1000x900_imageCube.pam","color":"#d62728","properties":{"type":"almond"}},
{"title":"background","uuid":"8bd8c222-f29f-54f5-88b6-5c78a59f0f2c","descriptor":{"annot":"rectangle","x":415,"y":397,"width":138,"height":51},"image_path":"mix1_1296x1000x900_imageCube.pam","color":"#9467bd","properties":{"type":"background"}},
{"title":"walnut1","uuid":"8bd8c222-f29f-54f5-88b6-5c78a59f0f2c","descriptor":{"annot":"polyline","points":[[345,164],[324,143],[320,110],[345,101],[347,109],[366,110],[369,97],[398,98],[411,118],[412,145],[387,157],[374,145],[370,157],[351,147]],"width":92,"height":67,"x":320,"y":97},"image_path":"mix1_1296x1000x900_imageCube.pam","color":"#8c564b","properties":{"type":"walnut"}},{"title":"walnut2","uuid":"8bd8c222-f29f-54f5-88b6-5c78a59f0f2c","descriptor":{"annot":"polyline","points":[[213,181],[194,178],[185,162],[191,144],[210,133],[227,132],[222,120],[226,108],[254,114],[258,127],[268,137],[267,152],[261,159],[264,165],[252,178],[241,172],[236,158],[212,163]],"width":83,"height":73,"x":185,"y":108},"image_path":"mix1_1296x1000x900_imageCube.pam","color":"#e377c2","properties":{"type":"walnut"}},
{"title":"hasselnut1","uuid":"8bd8c222-f29f-54f5-88b6-5c78a59f0f2c","descriptor":{"annot":"polyline","points":[[484,548],[472,545],[464,528],[465,519],[482,511],[499,511],[508,517],[513,531],[500,544]],"width":49,"height":37,"x":464,"y":511},"image_path":"mix1_1296x1000x900_imageCube.pam","color":"#7f7f7f","properties":{"type":"hasselnut"}},
{"title":"hasselnut2","uuid":"8bd8c222-f29f-54f5-88b6-5c78a59f0f2c","descriptor":{"annot":"polyline","points":[[524,611],[537,621],[560,625],[579,614],[578,597],[560,586],[538,588],[525,594]],"width":54,"height":39,"x":524,"y":586},"image_path":"mix1_1296x1000x900_imageCube.pam","color":"#bcbd22","properties":{"type":"hasselnut"}}
],"property_desc":{"type":{"descriptor_type":"labeldescriptor","labels":["almond","hasselnut","walnut","background"],"colors":{"almond":"#8ff0a4","hasselnut":"#f9f06b","walnut":"#f66151","background":"#1a5fb4"}}}
}
principal_components = np.reshape(res.swapaxes(1, 2), (-1, n))
# --- Group Annotations and Get PCA Values ---
# Use a dictionary to store lists of PCA values for each group
grouped_pc_values = {}
for annot in annotations_data["annotations"]:
# The class name is now directly in the 'type' property
class_name = annot["properties"]["type"]
desc = annot["descriptor"]
mask = np.zeros((height, width), dtype=bool)
# Convert different annotation types to a boolean mask
if desc["annot"] == "rectangle":
x, y, w, h = desc["x"], desc["y"], desc["width"], desc["height"]
mask[y:y+h, x:x+w] = True
elif desc["annot"] == "ellipse":
# HV Explorer versions prior to 0.2.6 had a bug in which the A and B
# ellipse parameteres where saved as diameters instead of radius
#cx, cy, a, b, angle = desc["cx"], desc["cy"], desc["a"]/2, desc["b"]/2, desc["angle"]
cx, cy, a, b, angle = desc["cx"], desc["cy"], desc["a"], desc["b"], desc["angle"]
y_grid, x_grid = np.ogrid[:height, :width]
ellipse_mask = (((x_grid - cx) * np.cos(np.deg2rad(angle)) + (y_grid - cy) * np.sin(np.deg2rad(angle)))**2 / a**2 +
((x_grid - cx) * np.sin(np.deg2rad(angle)) - (y_grid - cy) * np.cos(np.deg2rad(angle)))**2 / b**2) <= 1
mask[ellipse_mask] = True
elif desc["annot"] == "polyline":
points = np.array(desc["points"])
rr, cc = polygon(points[:, 1], points[:, 0], shape=(height, width))
mask[rr, cc] = True
# Get the 1D PCA values for the annotated pixels
mask_1d = mask.ravel()
annotated_pc_values = principal_components[mask_1d]
# Append the PCA values to the correct list in our dictionary
if class_name not in grouped_pc_values:
grouped_pc_values[class_name] = []
grouped_pc_values[class_name].append(annotated_pc_values)
# Concatenate the lists to get a single array of values for each class
final_class_data = {
class_name: np.concatenate(pc_lists)
for class_name, pc_lists in grouped_pc_values.items()
}
# Flatten the list of arrays for each class into a single array
final_class_data = {
class_name: np.concatenate(pc_lists)
for class_name, pc_lists in grouped_pc_values.items()
}
# --- Plotting the data by class ---
plt.figure(figsize=(10, 8))
component_x = 0
component_y = 1
# Plot all pixels in the background
plt.scatter(principal_components[:, component_x], principal_components[:, component_y], c='gray', alpha=0.1, s=1)
# Get class names and colors from the property_desc for a robust plot
class_labels = annotations_data['property_desc']['type']['labels']
class_colors = annotations_data['property_desc']['type']['colors']
# Iterate through the classes and plot each group with its own color and label
for class_name in class_labels:
if class_name in final_class_data:
pc_values = final_class_data[class_name]
plt.scatter(pc_values[:, component_x], pc_values[:, component_y],
color=class_colors[class_name], s=10, label=class_name)
plt.title("PCA Scatter Plot with Annotated Regions")
plt.xlabel(f"Principal Component {component_x}")
plt.ylabel(f"Principal Component {component_y}")
plt.legend()
plt.grid(True)
plt.show()
# --- Part 2: Classify the Entire Image based on Annotations ---
# Calculate the centroid (mean) for each class
centroids = {
class_name: np.mean(pc_values, axis=0)
for class_name, pc_values in final_class_data.items()
}
# Create a mapping from class title to a number for plotting
class_names = sorted(centroids.keys())
class_map = {name: i + 1 for i, name in enumerate(class_names)}
centroid_array = np.array([centroids[name] for name in class_names])
# Vectorized classification
distances = np.linalg.norm(principal_components[:, np.newaxis] - centroid_array, axis=2)
closest_centroid_indices = np.argmin(distances, axis=1)
classification_image = closest_centroid_indices + 1
# Reshape and visualize
classification_image_2d = classification_image.reshape(height, width)
# Use the colors from property_desc to correctly map colors
color_dict = annotations_data['property_desc']['type']['colors']
class_colors = [color_dict[name] for name in class_names]
final_colors = ['#000000'] + class_colors
final_map = plt.cm.colors.ListedColormap(final_colors)
plt.figure(figsize=(10, 8))
plt.imshow(classification_image_2d, cmap=final_map, interpolation='none')
plt.title("Image Classified by PCA Centroids")
plt.xlabel("Width")
plt.ylabel("Height")
cbar = plt.colorbar(ticks=range(1, len(class_map) + 1))
cbar.ax.set_yticklabels(class_names)
plt.show()