Upload your first scene
When uploading raw data to the Kognic Platform, you need to do so in the form of a scene. A scene is a collection of data from different sources, such as images, point clouds, and other sensor data.
This guide will walk you through the process of uploading your first scene, either in 2D (camera only) or 3D (camera and LiDAR/RADAR).
Prerequisites
You have successfully followed the Quickstart guide and have the kognic-io
library installed. For users with access to multiple workspaces you need to specify a workspace to upload data too.
Code examples
Uploading a 2D scene
To upload a 2D scene, you need to have the raw images available on your local machine (or create a callback for remote data). It is a two-step process:
- Build the scene object in Python
- Upload the scene object to the Kognic Platform
Below follows examples for a few different cases.
- One Image
- Multiple Images
- Sequence
from kognic.io.client import KognicIOClient
from kognic.io.model.scene.cameras import Cameras, Frame
from kognic.io.model.scene.resources import Image
# 1. Build scene object
scene = Cameras(
external_id="my-first-scene",
frame=Frame(images=[Image(filename="path/to/image.jpg")])
)
# 2. Upload scene
client = KognicIOClient()
scene_uuid = client.cameras.create(scene).scene_uuid
print("Scene uploaded, got uuid:", scene_uuid)
from kognic.io.client import KognicIOClient
from kognic.io.model.scene.cameras import Cameras, Frame
from kognic.io.model.scene.resources import Image
# 1. Build scene object
scene = Cameras(
external_id="my-first-scene",
frame=Frame(
images=[
# Sensor names must be unique
Image(sensor_name = "CAM1", filename="path/to/image1.jpg"),
Image(sensor_name = "CAM2", filename="path/to/image2.jpg")
],
)
)
# 2. Upload scene
client = KognicIOClient()
scene_uuid = client.cameras.create(scene).scene_uuid
print("Scene uploaded, got uuid:", scene_uuid)
from kognic.io.client import KognicIOClient
from kognic.io.model.scene.cameras_sequence import CamerasSequence, Frame
from kognic.io.model.scene.resources import Image
# 1. Build scene object
scene = CamerasSequence(
external_id="my-first-scene",
frames=[
# Relative timestamps must be unique and strictly increasing
Frame(
relative_timestamp=0,
frame_id="1",
images=[Image(filename="path/to/image1.jpg")],
),
Frame(
relative_timestamp=100,
frame_id="2",
images=[Image(filename="path/to/image2.jpg")],
),
Frame(
relative_timestamp=200,
frame_id="3",
images=[Image(filename="path/to/image3.jpg")],
),
]
)
# 2. Upload scene
client = KognicIOClient()
scene_uuid = client.cameras_sequence.create(scene).scene_uuid
print("Scene uploaded, got uuid:", scene_uuid)
Uploading a 2D/3D scene
To upload a 2D/3D scene, you need to have the raw images and point clouds available on your local machine (or create a callback for remote data). In addition you need to have calibration data available. It is a three-step process:
- Create a calibration
- Build the scene object in Python, referencing the calibration from the previous step
- Upload the scene object to the Kognic Platform
Below follows examples for a few different cases.
- One Image
- Multiple Images
- Sequence
from kognic.io.client import KognicIOClient
from kognic.io.model.calibration import SensorCalibration, PinholeCalibration, LidarCalibration
from kognic.io.model.scene.lidars_and_cameras import LidarsAndCameras, Frame
from kognic.io.model.scene.resources import Image, PointCloud
client = KognicIOClient()
# 1. Create calibration (see calibration section for more details)
sensor_calibration = SensorCalibration(
external_id = "my-first-calibration",
calibration = {
"CAM": PinholeCalibration(...),
"lidar": LidarCalibration(...)
}
)
created_calibration = client.calibration.create_calibration(sensor_calibration)
# 2. Build scene object
scene = LidarsAndCameras(
external_id=f"my-first-scene",
calibration_id = created_calibration.id,
frame=Frame(
images=[Image(sensor_name = "CAM", filename="path/to/image.jpg")],
point_clouds=[PointCloud(sensor_name = "lidar", filename="path/to/pointcloud.pcd")]
)
)
# 3. Upload scene
scene_uuid = client.lidars_and_cameras.create(scene).scene_uuid
print("Scene uploaded, got uuid:", scene_uuid)
from kognic.io.client import KognicIOClient
from kognic.io.model.calibration import SensorCalibration, PinholeCalibration, LidarCalibration
from kognic.io.model.scene.lidars_and_cameras import LidarsAndCameras, Frame
from kognic.io.model.scene.resources import Image, PointCloud
client = KognicIOClient()
# 1. Create calibration (see calibration section for more details)
sensor_calibration = SensorCalibration(
external_id = "my-first-calibration",
calibration = {
"CAM1": PinholeCalibration(...),
"CAM2": PinholeCalibration(...),
"lidar": LidarCalibration(...)
}
)
created_calibration = client.calibration.create_calibration(sensor_calibration)
# 2. Build scene object
scene = LidarsAndCameras(
external_id="my-first-scene",
calibration_id = created_calibration.id,
frame=Frame(
images=[
Image(sensor_name = "CAM1", filename="path/to/image1.jpg"),
Image(sensor_name = "CAM2", filename="path/to/image2.jpg"),
],
point_clouds=[PointCloud(sensor_name = "lidar", filename="path/to/pointcloud.pcd")]
)
)
# 3. Upload scene
client = KognicIOClient()
scene_uuid = client.lidars_and_cameras.create(scene).scene_uuid
print("Scene uploaded, got uuid:", scene_uuid)
from kognic.io.client import KognicIOClient
from kognic.io.model.calibration import SensorCalibration, PinholeCalibration, LidarCalibration
from kognic.io.model.scene.lidars_and_cameras_sequence import LidarsAndCamerasSequence, Frame
from kognic.io.model.scene.resources import Image, PointCloud
client = KognicIOClient()
# 1. Create calibration (see calibration section for more details)
calibration = { "CAM": PinholeCalibration(...), "lidar": LidarCalibration(...) }
sensor_calibration = SensorCalibration(
external_id = "my-first-calibration",
calibration = {
"CAM": PinholeCalibration(...),
"lidar": LidarCalibration(...)
}
)
created_calibration = client.calibration.create_calibration(sensor_calibration)
# 2. Build scene object
scene = LidarsAndCamerasSequence(
external_id="my-first-scene",
calibration_id = created_calibration.id,
frames=[
# Relative timestamps must be unique and strictly increasing
Frame(
relative_timestamp=0,
frame_id="1",
images=[Image(sensor_name = "CAM", filename="path/to/image1.jpg")],
point_clouds=[PointCloud(sensor_name = "lidar", filename="path/to/pointcloud1.pcd")]
),
Frame(
relative_timestamp=100,
frame_id="2",
images=[Image(sensor_name = "CAM", filename="path/to/image2.jpg")],
point_clouds=[PointCloud(sensor_name = "lidar", filename="path/to/pointcloud2.pcd")]
),
Frame(
relative_timestamp=200,
frame_id="3",
images=[Image(sensor_name = "CAM", filename="path/to/image3.jpg")],
point_clouds=[PointCloud(sensor_name = "lidar", filename="path/to/pointcloud3.pcd")]
),
]
)
# 3. Upload scene
scene_uuid = client.lidars_and_cameras_sequence.create(scene).scene_uuid
print("Scene uploaded, got uuid:", scene_uuid)
Multiple point clouds is also supported, but not shown in the examples above since that requires a bit more data. See the Motion Compensation section for more details.
Uploading using ZOD Data
We have exemplar code and a tutorial for uploading scenes using Zenseact Open Dataset (ZOD) data, including 2D, 3D, and aggregated 3D scenes. Check out the guide document and exemplar code here!
If you have the ZOD data downloaded, and have Kognic API credentials, the examples will run out of the box to create functional scenes!