Skip to content

Commit 8e06010

Browse files
committed
v0.2.0
1 parent d2368b7 commit 8e06010

38 files changed

Lines changed: 1306 additions & 317 deletions

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,11 +67,11 @@ payload: # List of input elements used for data annotations (optional).
6767
Sensors are bundled in groups. Within a group **sensor names must be unique**.
6868
An example config file is provided as [`group.yaml`](examples/config/group.yaml). There, you also find example configs of all supported sensors.
6969

70-
The payload can be used for data annotations. You can find an overview of supported widgets at [`payload.md`](examples/payload.md).
70+
The payload can be used for data annotations. You can find an overview of supported widgets at [`payload.md`](docs/payload.md).
7171

7272
### 2. Using Code
7373

74-
For programmatic control, please refer to the [`demo.py`](examples/demo.py) example.
74+
For programmatic control, please refer to the [`demo.py`](examples/record_data.py) example.
7575
- Run with the default config ([`digit.yaml`](examples/config/digit.yaml))
7676
```bash
7777
python examples/simple/demo.py
File renamed without changes.

examples/access_data.py

Lines changed: 21 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1,41 +1,21 @@
1-
"""
2-
This script demonstrates how to access and read saved sensor data from recordings.
3-
4-
## Data Structure:
5-
- During recording, data is stored in smaller chunks as `.pkl` files.
6-
- Each `.pkl` file contains a dictionary mapping `sensor_name` to its corresponding sensor data.
7-
- Sensor data is a dictionary where each `data_stream` (e.g., 'camera' for Digit sensor) maps to a list of data points.
8-
- Each data point is a dictionary with:
9-
- `delta`: Time elapsed since recording started.
10-
- `data`: The actual recorded data point.
11-
"""
12-
13-
import os
14-
import pickle
15-
from typing import Generator, Dict, Any
16-
17-
18-
def read_dataset(directory: str) -> Generator[Dict[str, Any], None, None]:
19-
"""
20-
Reads and yields sensor data from all `.pkl` files in the specified directory.
21-
22-
Args:
23-
directory (str): Path to the dataset folder.
24-
25-
Yields:
26-
dict: Parsed sensor data from each `.pkl` file.
27-
"""
28-
if not os.path.exists(directory):
29-
raise FileNotFoundError(f"Directory not found: {directory}")
30-
31-
for filename in sorted(os.listdir(directory)):
32-
if filename.endswith('.pkl'):
33-
file_path = os.path.join(directory, filename)
34-
with open(file_path, 'rb') as file:
35-
yield pickle.load(file)
36-
37-
38-
if __name__ == "__main__":
39-
dataset_path = "../datasets/my_dataset"
40-
for data_chunk in read_dataset(dataset_path):
41-
print(data_chunk)
1+
from opentouch_interface.decoder import Decoder
2+
3+
# 1. Load the data set
4+
dataset = Decoder('<full-path-to-touch-file>')
5+
6+
# 2. Inspect the data set
7+
print(f'The following sensors have been captured: {dataset.sensor_names}')
8+
print('The sensors have the following streams:')
9+
for sensor in dataset.sensor_names:
10+
print(f'\t- {sensor}: {dataset.stream_names_of(sensor)}')
11+
12+
# 3. Isolate the camera data
13+
sensor_name = dataset.sensor_names[0]
14+
data_stream = 'camera'
15+
camera_data = dataset.stream_data_of(sensor_name, data_stream)
16+
17+
camera_data = dataset.stream_data_of('Finger', 'audio')
18+
for event in camera_data:
19+
delta = event['delta']
20+
data = event['data']
21+
print(delta, type(data))

examples/datasets.py

Lines changed: 0 additions & 9 deletions
This file was deleted.
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,13 @@ def run(cfg: DictConfig):
2020
config = OmegaConf.to_container(cfg, resolve=True)
2121
sensor_group = SensorGroup(config=config)
2222

23+
# Add metadata (e.g., data of recording) to payload
24+
sensor_group.payload.add({
25+
'type': 'text_input',
26+
'label': 'time',
27+
'default': str(time.time())
28+
})
29+
2330
# Retrieve and configure a specific sensor by name
2431
digit = sensor_group.get_sensor('First Gripper')
2532
digit.set('rgb', [0, 0, 15])

opentouch_interface/dashboard/models/__init__.py renamed to opentouch/__init__.py

File renamed without changes.

opentouch/core/__init__.py

Whitespace-only changes.

opentouch/core/base_cnn.py

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
from abc import abstractmethod
2+
from typing import Dict, Any
3+
4+
from torch import Tensor
5+
6+
from opentouch.core.base_nn import BaseNeuralNetwork
7+
8+
9+
class BaseCNN(BaseNeuralNetwork):
10+
"""
11+
Base class for building Convolutional Neural Networks (CNNs) that supports both
12+
PyTorch and ONNX inference. Handles model construction, forward pass, prediction,
13+
and saving models in ONNX format with metadata.
14+
15+
Attributes:
16+
input_channels (int): Number of input channels (e.g., 3 for RGB images).
17+
num_classes (int): Number of output classes for classification.
18+
label_mapping (dict, optional): Mapping from class indices to labels.
19+
"""
20+
21+
def __init__(self, label_mapping: dict, input_channels: int = 3) -> None:
22+
"""Initialize the BaseCNN model."""
23+
super().__init__()
24+
self.input_channels: int = input_channels
25+
self.label_mapping: dict = label_mapping
26+
self.num_classes: int = len(label_mapping)
27+
self.build() # Call the subclass's build method to define the architecture
28+
self.to(self.device) # Move model to device (CPU or GPU)
29+
30+
@property
31+
def metadata(self) -> Dict[str, Any]:
32+
"""
33+
Constructs metadata with the basic attributes required for all models, including input channels,
34+
number of classes, label mapping and output type.
35+
"""
36+
base_metadata = super().metadata
37+
base_metadata.update({
38+
'input_channels': self.input_channels,
39+
'num_classes': self.num_classes,
40+
'label_mapping': self.label_mapping,
41+
'output_type': 'classification'
42+
})
43+
return base_metadata
44+
45+
@abstractmethod
46+
def build(self) -> None:
47+
"""To be implemented in derived classes to define the model architecture."""
48+
pass
49+
50+
@abstractmethod
51+
def preprocess(self, x: Tensor) -> Tensor:
52+
"""
53+
Preprocesses the input tensor before it's fed into the model. This method can be overridden
54+
in subclasses for custom preprocessing.
55+
56+
Args:
57+
x (Tensor): Input tensor.
58+
59+
Returns:
60+
Tensor: Preprocessed input tensor.
61+
"""
62+
return x
63+
64+
def forward(self, x: Tensor) -> Tensor:
65+
"""
66+
Forward pass of the model. Ensures the input tensor is processed and passed through
67+
the defined model. Raises an error if the model isn't built.
68+
69+
Args:
70+
x (Tensor): Input tensor.
71+
72+
Returns:
73+
Tensor: The output of the model after the forward pass.
74+
"""
75+
if not self.model:
76+
raise NotImplementedError("The model is not defined. Implement build() in your subclass.")
77+
78+
# Move input to the correct device
79+
x = x.to(self.device)
80+
81+
# Preprocess input
82+
x = self.preprocess(x)
83+
84+
return self.model(x) # Forward pass through the model

opentouch/core/base_filter.py

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
from typing import Dict, Any
2+
3+
import torch
4+
from abc import abstractmethod
5+
6+
from opentouch.core.base_model import BaseModel
7+
8+
9+
class BaseFilter(BaseModel):
10+
"""
11+
A base abstract class for all filters, providing utility methods for metadata handling,
12+
filter saving in ONNX format, and enforcing standard PyTorch module behavior for filtering.
13+
"""
14+
15+
def __init__(self):
16+
super().__init__()
17+
18+
@property
19+
def metadata(self) -> Dict[str, Any]:
20+
"""
21+
Constructs metadata with the basic attributes required for all models, including input channels,
22+
number of classes, and output type.
23+
"""
24+
base_metadata = super().metadata
25+
base_metadata.update({
26+
'output_type': 'image'
27+
})
28+
return base_metadata
29+
30+
@property
31+
@abstractmethod
32+
def description(self) -> str:
33+
"""
34+
Abstract property for a description of the filter's purpose.
35+
"""
36+
pass
37+
38+
@abstractmethod
39+
def forward(self, x: torch.Tensor) -> torch.Tensor:
40+
"""
41+
Abstract method that must be implemented to define the forward pass for the filter.
42+
Args:
43+
x (torch.Tensor): Input tensor to be filtered.
44+
Returns:
45+
torch.Tensor: Output tensor after passing through the filter.
46+
"""
47+
pass
48+
49+
@abstractmethod
50+
def onnx_export(self) -> Dict[str, Any]:
51+
"""
52+
Constructs the parameters needed for torch.onnx.export().
53+
54+
Returns:
55+
dict: A dictionary containing the parameters needed for ONNX export.
56+
"""
57+
pass

opentouch/core/base_model.py

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
import json
2+
import zipfile
3+
from abc import ABC, abstractmethod
4+
from pathlib import Path
5+
from typing import Dict, Any
6+
7+
import torch
8+
import torch.nn as nn
9+
10+
11+
class BaseModel(nn.Module, ABC):
12+
"""
13+
A base abstract class for all models, providing utility methods for metadata handling,
14+
model saving in ONNX format, and enforcing standard PyTorch model behavior.
15+
"""
16+
17+
def __init__(self):
18+
super().__init__()
19+
20+
@property
21+
@abstractmethod
22+
def description(self) -> str:
23+
"""Abstract property for a description of the model's purpose."""
24+
pass
25+
26+
@property
27+
def metadata(self) -> Dict[str, Any]:
28+
"""
29+
Constructs metadata with the basic attributes required for all models.
30+
31+
Returns:
32+
dict: Metadata containing model_type and description.
33+
"""
34+
return {
35+
'model_type': self.__class__.__name__,
36+
'description': self.description
37+
}
38+
39+
@abstractmethod
40+
def forward(self, x: torch.Tensor) -> torch.Tensor:
41+
"""
42+
Abstract method that must be implemented to define the forward pass for the model.
43+
Args:
44+
x (torch.Tensor): Input tensor.
45+
Returns:
46+
torch.Tensor: Output tensor after passing through the model.
47+
"""
48+
pass
49+
50+
@abstractmethod
51+
def onnx_export(self) -> Dict[str, Any]:
52+
"""
53+
Constructs the parameters needed for torch.onnx.export().
54+
55+
Returns:
56+
dict: A dictionary containing the parameters needed for ONNX export.
57+
"""
58+
pass
59+
60+
def save(self, path: str) -> None:
61+
"""
62+
Saves the model in ONNX format and stores it in a zip file along with metadata.
63+
64+
Args:
65+
path (str): The path to save the model.
66+
"""
67+
# Gather ONNX export parameters
68+
onnx_params = self.onnx_export()
69+
onnx_path = Path(path).with_suffix(".onnx")
70+
metadata_path = Path(path + "_metadata.json")
71+
72+
# Perform the ONNX export using the current model (`self`)
73+
torch.onnx.export(
74+
self,
75+
onnx_params['example_input'],
76+
str(onnx_path), # This is the required third argument - file path
77+
export_params=onnx_params.get('export_params', True),
78+
opset_version=onnx_params.get('opset_version', 17),
79+
input_names=onnx_params.get('input_names', ['input']),
80+
output_names=onnx_params.get('output_names', ['output']),
81+
# dynamic_axes=onnx_params.get('dynamic_axes', {'input': {0: 'batch_size'}}),
82+
)
83+
84+
# onnx_program.save(str(onnx_path)) # Saves a file called onnx_path.onnx
85+
86+
# Save metadata to a JSON file
87+
metadata = self.metadata
88+
metadata_path.write_text(json.dumps(metadata))
89+
90+
# Zip ONNX model and metadata file
91+
zip_path = Path(path).with_suffix(".zip")
92+
with zipfile.ZipFile(zip_path, 'w') as model_zip:
93+
model_zip.write(onnx_path, onnx_path.name)
94+
model_zip.write(metadata_path, metadata_path.name)
95+
96+
# Clean up temporary files
97+
onnx_path.unlink()
98+
metadata_path.unlink()

0 commit comments

Comments
 (0)