Examples
This section provides complete, practical examples of using the Zonotope package for different applications.
Basic Uncertainty Propagation
This example demonstrates how to use zonotopes for basic uncertainty propagation through a linear system.
import torch as t
from zonotope import Zonotope
# Define a 2D system with uncertainty
A = t.tensor([[0.9, 0.1],
[0.1, 0.9]])
# Initial state with uncertainty
x0 = Zonotope.from_values(
center=[1.0, 0.0], # Initial mean state
infinity_terms=[[0.1, 0], [0, 0.1]], # Initial uncertainty
)
# Propagate the system for 10 steps
states = [x0]
x = x0
for i in range(10):
# Linear state update: x_next = A * x
x = x.mul(A, pattern="d, d e -> e")
states.append(x.clone())
# Print the bounds of the final state
final_lower, final_upper = states[-1].concretize()
print(f"Final state bounds: [{final_lower}, {final_upper}]")
# We can sample trajectories from these zonotopes
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 6))
# Sample 10 trajectories
n_trajectories = 10
trajectories = []
for i in range(n_trajectories):
trajectory = []
for state in states:
# Sample a single point from each state zonotope
point = state.sample_point(n_samples=1)[0]
trajectory.append(point.detach().numpy())
trajectories.append(t.tensor(trajectory))
# Plot the trajectories
for traj in trajectories:
plt.plot(traj[:, 0], traj[:, 1], 'b-', alpha=0.3)
# Plot the initial and final state bounds
for i, state in enumerate([states[0], states[-1]]):
lower, upper = state.concretize()
lower = lower.detach().numpy()
upper = upper.detach().numpy()
color = 'g' if i == 0 else 'r'
label = 'Initial' if i == 0 else 'Final'
# Plot a rectangle representing the bounds
plt.fill([lower[0], upper[0], upper[0], lower[0], lower[0]],
[lower[1], lower[1], upper[1], upper[1], lower[1]],
color=color, alpha=0.2, label=label)
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('Uncertainty Propagation with Zonotopes')
plt.legend()
plt.grid(True)
plt.axis('equal')
plt.show()
Batch Processing of Multiple Zonotopes
This example shows how to efficiently work with batches of zonotopes:
import torch as t
from zonotope import Zonotope
# Create a batch of 100 zonotopes representing different initial conditions
batch_size = 100
dim = 5 # 5-dimensional zonotopes
# Batch of centers
centers = t.randn(batch_size, dim)
# Batch of error terms (same structure for all zonotopes in the batch)
infinity_terms = t.zeros(batch_size, dim, 2)
infinity_terms[:, :, 0] = 0.1 # First error term
infinity_terms[:, :, 1] = 0.2 # Second error term
# Create the batch zonotope
batch_z = Zonotope(
center=centers,
infinity_terms=infinity_terms,
special_norm=2
)
# We can process the whole batch at once
# For example, scaling all zonotopes
scaled_batch = batch_z * 2.0
# Or we can extract individual zonotopes
first_z = batch_z[0]
subset_z = batch_z[:10]
# Compute bounds for all zonotopes at once
all_lower, all_upper = batch_z.concretize()
print(f"Shape of bounds: {all_lower.shape}") # [batch_size, dim]
# Calculate the average volume of all zonotopes
volumes = t.prod(all_upper - all_lower, dim=1)
avg_volume = t.mean(volumes)
print(f"Average volume: {avg_volume:.4f}")
Neural Network Verification
This example shows a simplified version of how zonotopes can be used for neural network verification:
import torch as t
import torch.nn as nn
from zonotope import Zonotope
# Define a simple feedforward neural network
class SimpleNN(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10)
self.fc2 = nn.Linear(10, 2)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.fc2(x)
return x
# Create and initialize the network
net = SimpleNN()
# Define an input region as a zonotope
input_region = Zonotope.from_bounds(
lower=t.tensor([-1.0, -1.0]),
upper=t.tensor([1.0, 1.0])
)
# To propagate through the linear layer, we use the weight matrix directly
def propagate_linear(z, layer):
weights = layer.weight.data
bias = layer.bias.data
# Apply the linear transformation
result = z.mul(weights, pattern="d, o d -> o") + bias
return result
# A simple (but imprecise) ReLU approximation using concretization
def propagate_relu(z):
lower, upper = z.concretize()
# Case 1: If upper <= 0, output is zero
# Case 2: If lower >= 0, ReLU is identity
# Case 3: Otherwise, we need to approximate
new_center = t.zeros_like(z.W_C)
new_infinity_terms = t.zeros_like(z.W_Ei)
new_special_terms = None if z.Es == 0 else t.zeros_like(z.W_Es)
# Process each dimension separately
for i in range(z.N):
if upper[i] <= 0:
# Output is zero
new_center[i] = 0
new_infinity_terms[i] = 0
if z.Es > 0:
new_special_terms[i] = 0
elif lower[i] >= 0:
# ReLU is identity
new_center[i] = z.W_C[i]
new_infinity_terms[i] = z.W_Ei[i]
if z.Es > 0:
new_special_terms[i] = z.W_Es[i]
else:
# Approximate with a new zonotope
# Center at (upper/2)
# One error term with radius (upper/2)
new_center[i] = upper[i] / 2
# Add a new error term
new_infinity_terms[i, 0] = upper[i] / 2
# Zero out other error terms in this dimension
new_infinity_terms[i, 1:] = 0
if z.Es > 0:
new_special_terms[i] = 0
return Zonotope(
center=new_center,
infinity_terms=new_infinity_terms,
special_terms=new_special_terms,
special_norm=z.p
)
# Propagate through the network
z = input_region
# First layer
z = propagate_linear(z, net.fc1)
z = propagate_relu(z)
# Second layer
z = propagate_linear(z, net.fc2)
# Get the output bounds
output_lower, output_upper = z.concretize()
print(f"Output bounds: [{output_lower}, {output_upper}]")
# This is a simplified example. A real implementation would use more
# precise approximations of ReLU and handle other activation functions.
Working with Sensors and Measurement Uncertainty
This example demonstrates how to use zonotopes to model and propagate sensor measurement uncertainty:
import torch as t
import matplotlib.pyplot as plt
from zonotope import Zonotope
# Consider a robot position with measurement uncertainty
# Let's say we have measurements from two sensors, each with their own uncertainty
# Sensor 1 measurement (position in 2D)
sensor1_mean = t.tensor([5.0, 3.0])
sensor1_uncertainty = t.tensor([[0.2, 0], [0, 0.2]]) # Uncorrelated uncertainty
# Sensor 2 measurement
sensor2_mean = t.tensor([5.2, 2.8])
sensor2_uncertainty = t.tensor([[0.1, 0.05], [0.05, 0.15]]) # Correlated uncertainty
# Convert to zonotopes
z1 = Zonotope(
center=sensor1_mean,
infinity_terms=sensor1_uncertainty
)
# For the correlated uncertainty, we need to decompose the covariance
# In a real application, you might use SVD or other decomposition
# For simplicity, we'll just use the covariance directly as error terms
z2 = Zonotope(
center=sensor2_mean,
infinity_terms=sensor2_uncertainty
)
# Sensor fusion - a simple weighted average
# In a real application, you might use a more sophisticated approach
fusion_weight = 0.6 # Weight for sensor 1
fused_z = z1 * fusion_weight + z2 * (1 - fusion_weight)
# Let's visualize the measurements and the fused estimate
plt.figure(figsize=(10, 8))
# Function to plot a zonotope as an ellipse (approximation)
def plot_zonotope_approx(z, color, label):
# Sample points from the zonotope
n_samples = 1000
samples = z.sample_point(n_samples=n_samples).detach().numpy()
# Plot the samples
plt.scatter(samples[:, 0], samples[:, 1], c=color, alpha=0.1, s=5)
# Plot the center
plt.scatter([z.W_C[0]], [z.W_C[1]], c=color, marker='x', s=100, label=label)
# Plot a convex hull or ellipse approximation
lower, upper = z.concretize()
lower = lower.detach().numpy()
upper = upper.detach().numpy()
# Plot a rectangle representing the bounds
plt.fill([lower[0], upper[0], upper[0], lower[0], lower[0]],
[lower[1], lower[1], upper[1], upper[1], lower[1]],
color=color, alpha=0.2)
# Plot the measurements and fused estimate
plot_zonotope_approx(z1, 'blue', 'Sensor 1')
plot_zonotope_approx(z2, 'green', 'Sensor 2')
plot_zonotope_approx(fused_z, 'red', 'Fused Estimate')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Sensor Fusion with Zonotopes')
plt.legend()
plt.grid(True)
plt.axis('equal')
plt.show()
Custom Zonotope Operations
This example shows how to implement custom operations on zonotopes:
import torch as t
from zonotope import Zonotope
# Define a custom operation - quadratic transform
def quadratic_transform(z):
"""
Apply a quadratic transform f(x) = x^2 to a zonotope.
This is a simplified approximation.
"""
# Get the bounds of the zonotope
lower, upper = z.concretize()
# For a quadratic function, we need to consider different cases
# Case 1: Both lower and upper are non-negative or both non-positive
# Case 2: lower < 0 < upper (interval contains zero)
new_center = t.zeros_like(z.W_C)
new_infinity_terms = t.zeros_like(z.W_C).unsqueeze(-1)
for i in range(z.N):
if lower[i] >= 0 or upper[i] <= 0:
# Case 1: Interval doesn't contain zero
# For x^2, the new bounds are [min(lower^2, upper^2), max(lower^2, upper^2)]
l_squared = lower[i] ** 2
u_squared = upper[i] ** 2
new_min = t.min(l_squared, u_squared)
new_max = t.max(l_squared, u_squared)
# Set center to midpoint of new bounds
new_center[i] = (new_min + new_max) / 2
# Set radius to half the range
new_infinity_terms[i, 0] = (new_max - new_min) / 2
else:
# Case 2: Interval contains zero, so lower < 0 < upper
# For x^2, the new bounds are [0, max(lower^2, upper^2)]
new_max = t.max(lower[i] ** 2, upper[i] ** 2)
# Set center to midpoint of new bounds
new_center[i] = new_max / 2
# Set radius to half the range
new_infinity_terms[i, 0] = new_max / 2
# Return a new zonotope with the transformed bounds
return Zonotope(
center=new_center,
infinity_terms=new_infinity_terms,
special_norm=z.p
)
# Let's test our custom operation
# Create a zonotope with center at origin and extending to both positive and negative
z = Zonotope.from_values(
center=[0.0, 2.0], # One dimension crosses zero, one doesn't
infinity_terms=[[1.0], [0.5]] # Range is [-1,1] for first dim, [1.5,2.5] for second
)
print("Original zonotope:")
print(f"Center: {z.W_C}")
print(f"Bounds: {z.concretize()}")
# Apply our custom quadratic transform
z_squared = quadratic_transform(z)
print("\nAfter quadratic transform:")
print(f"Center: {z_squared.W_C}")
print(f"Bounds: {z_squared.concretize()}")
# We can verify with sampling
n_samples = 1000
original_samples = z.sample_point(n_samples)
transformed_samples = original_samples ** 2
# Calculate min and max of transformed samples
transformed_min = t.min(transformed_samples, dim=0)[0]
transformed_max = t.max(transformed_samples, dim=0)[0]
print("\nVerification with sampling:")
print(f"Min of transformed samples: {transformed_min}")
print(f"Max of transformed samples: {transformed_max}")
print(f"Bounds from zonotope: {z_squared.concretize()}")
Combining Multiple Error Types
This example demonstrates the use of both infinity-norm and p-norm error terms:
import torch as t
import matplotlib.pyplot as plt
from zonotope import Zonotope
# Create zonotopes with different error term configurations
# 1. Only infinity-norm errors (box)
box_z = Zonotope.from_values(
center=[0.0, 0.0],
infinity_terms=[[1.0, 0.0], [0.0, 1.0]]
)
# 2. Only 2-norm errors (ball)
ball_z = Zonotope.from_values(
center=[0.0, 0.0],
special_terms=[[1.0, 0.0], [0.0, 1.0]],
special_norm=2
)
# 3. Only 1-norm errors (diamond)
diamond_z = Zonotope.from_values(
center=[0.0, 0.0],
special_terms=[[1.0, 0.0], [0.0, 1.0]],
special_norm=1
)
# 4. Combination of infinity-norm and 2-norm errors
combo_z = Zonotope.from_values(
center=[0.0, 0.0],
infinity_terms=[[0.5, 0.0], [0.0, 0.5]],
special_terms=[[0.5, 0.0], [0.0, 0.5]],
special_norm=2
)
# Function to plot samples from a zonotope
def plot_zonotope_samples(ax, z, color, label, n_samples=1000):
samples = z.sample_point(n_samples=n_samples).detach().numpy()
ax.scatter(samples[:, 0], samples[:, 1], c=color, alpha=0.1, s=5)
ax.scatter([0], [0], c=color, marker='x', s=100, label=label)
# Plot bounds
lower, upper = z.concretize()
lower = lower.detach().numpy()
upper = upper.detach().numpy()
# Plot a rectangle representing the bounds
ax.fill([lower[0], upper[0], upper[0], lower[0], lower[0]],
[lower[1], lower[1], upper[1], upper[1], lower[1]],
color=color, alpha=0.1)
# Plot all zonotopes
fig, axes = plt.subplots(2, 2, figsize=(12, 10))
axes = axes.flatten()
plot_zonotope_samples(axes[0], box_z, 'blue', 'Box (∞-norm)')
plot_zonotope_samples(axes[1], ball_z, 'green', 'Ball (2-norm)')
plot_zonotope_samples(axes[2], diamond_z, 'red', 'Diamond (1-norm)')
plot_zonotope_samples(axes[3], combo_z, 'purple', 'Combined')
for i, title in enumerate(['Box (∞-norm)', 'Ball (2-norm)', 'Diamond (1-norm)', 'Combined']):
axes[i].set_title(title)
axes[i].set_xlim([-1.5, 1.5])
axes[i].set_ylim([-1.5, 1.5])
axes[i].grid(True)
axes[i].set_aspect('equal')
axes[i].legend()
plt.tight_layout()
plt.savefig('zonotope_types.png')
plt.show()
Working with Large Batch Dimensions
This example demonstrates handling zonotopes with multiple batch dimensions:
import torch as t
from zonotope import Zonotope
# Create a zonotope with multiple batch dimensions
# For example, batch_dim1 x batch_dim2 x feature_dim
batch_dim1 = 10
batch_dim2 = 5
feature_dim = 3
# Create center tensor with shape [batch_dim1, batch_dim2, feature_dim]
centers = t.randn(batch_dim1, batch_dim2, feature_dim)
# Create error terms
infinity_terms = t.abs(t.randn(batch_dim1, batch_dim2, feature_dim, 2)) * 0.1
special_terms = t.randn(batch_dim1, batch_dim2, feature_dim, 3) * 0.05
# Create the multi-batch zonotope
multi_batch_z = Zonotope(
center=centers,
infinity_terms=infinity_terms,
special_terms=special_terms,
special_norm=2
)
print(f"Zonotope shape: {multi_batch_z.shape}")
print(f"Total elements: {multi_batch_z.N}")
print(f"Error terms: Ei={multi_batch_z.Ei}, Es={multi_batch_z.Es}")
# We can slice this zonotope along batch dimensions
first_batch = multi_batch_z[0]
print(f"First batch shape: {first_batch.shape}")
sub_batch = multi_batch_z[3:7, 1:3]
print(f"Sub-batch shape: {sub_batch.shape}")
# Reshape using rearrange
flattened_batch = multi_batch_z.rearrange("b1 b2 d -> (b1 b2) d")
print(f"Flattened batch shape: {flattened_batch.shape}")
# Split using rearrange
split_batch = multi_batch_z.rearrange("(b1 b2) d -> b1 b2 d", b1=2, b2=25)
print(f"Split batch shape: {split_batch.shape}")
# Compute bounds for the entire batch at once
lower, upper = multi_batch_z.concretize()
print(f"Bounds shape: {lower.shape}")
These examples demonstrate the versatility and power of the Zonotope package for handling uncertainty in various scenarios and across different domains.