Canvatorium Visio Labs 5022-5025
Today I put together a handful of labs using SwiftUI gestures to manipulate RealityKit entities.
DragGesture, RotateGesture3D, MagnifyGesture, and a combined rotation and scale gesture.
Lab 5022
Constrain DragGesture to specific axes and bounds.
Write to only the position axes that you want to translate. Clamp the position value using min and max.
struct Lab5022: View {
var body: some View {
RealityView { content in
if let root = try? await Entity(named: "GestureLab", in: realityKitContentBundle) {
root.position = [0, -0.45, 0]
if let subject = root.findEntity(named: "Cube") {
subject.components.set(HoverEffectComponent())
}
content.add(root)
}
}
.gesture(dragGesture)
}
var dragGesture: some Gesture {
DragGesture()
.targetedToAnyEntity()
.onChanged { value in
let newPostion = value.convert(value.location3D, from: .global, to: value.entity.parent!)
// Clamp the values in both X and Z, ignore Y
let limit: Float = 0.175
value.entity.position.x = min(max(newPostion.x, -limit), limit)
value.entity.position.z = min(max(newPostion.z, -limit), limit)
}
}
}
Lab 5023
Using RotateGesture3D to rotate 3D content.
Rotate an entity around the Y axis based on initial rotation.
struct Lab5023: View {
// 1. Create some state to store the transform at the start of a gesture
@State private var entityTransformAtStartOfGesture: Transform?
var body: some View {
RealityView { content in
if let root = try? await Entity(named: "GestureLab", in: realityKitContentBundle) {
root.position = [0, -0.45, 0]
if let subject = root.findEntity(named: "Cube") {
subject.components.set(HoverEffectComponent())
}
content.add(root)
}
}
.gesture(rotateGesture)
}
var rotateGesture: some Gesture {
// I'm using constrainedToAxis to rotate around the Y axis only, but this is not required
RotateGesture3D(constrainedToAxis: .y)
.targetedToAnyEntity()
.onChanged { value in
// 2. Stash the current transform in state
if entityTransformAtStartOfGesture == nil {
entityTransformAtStartOfGesture = value.entity.transform
}
// 3. Create new transform based on the gesture rotation value
let rotationTransform = Transform(AffineTransform3D(rotation: value.rotation))
// 4. Multiply the stashed rotation by the rotation of the new transform
value.entity.transform.rotation = entityTransformAtStartOfGesture!.rotation * rotationTransform.rotation
}
.onEnded { value in
// Clear the stashed value
entityTransformAtStartOfGesture = nil
}
}
}
Lab 2024
Using MagnifyGesture to scale 3D content.
Scale an entity on all axes based on the initial scale.
struct Lab5024: View {
// 1. Create some state to store the transform at the start of a gesture
@State private var entityTransformAtStartOfGesture: Transform?
var body: some View {
RealityView { content in
if let root = try? await Entity(named: "GestureLab", in: realityKitContentBundle) {
root.position = [0, -0.45, 0]
if let subject = root.findEntity(named: "Cube") {
subject.components.set(HoverEffectComponent())
}
content.add(root)
}
}
.gesture(scaleGesture)
}
var scaleGesture: some Gesture {
MagnifyGesture()
.targetedToAnyEntity()
.onChanged { value in
// 2. Stash the current transform in state
if entityTransformAtStartOfGesture == nil {
entityTransformAtStartOfGesture = value.entity.transform
}
// 3. Get the initial scale based. I'm using a single axis here since I'm going to scale evenly on all axes.
if let initialScale = entityTransformAtStartOfGesture?.scale.x {
// 4. Multiply the gesture magnification by the initial scale
let scaler = Float(value.magnification) * initialScale
// 5. Clamp the scaled value within a range
let minScale: Float = 0.1
let maxScale: Float = 1
let scaled = min(Float(max(Float(scaler), minScale)), maxScale)
// 6. Create a vector for the new scale
let newScale = SIMD3(x: scaled, y: scaled, z: scaled)
// 7. Apply the new scale to the entity
value.entity.setScale(newScale, relativeTo: value.entity.parent!)
// Bonus: move the entity based on scale to keep it on top of the platform
value.entity.position.y = scaled / 10
}
}
.onEnded { value in
// Clear the stashed value
entityTransformAtStartOfGesture = nil
}
}
}
Lab 2025
Combine RotateGesture3D and MagnifyGesture.
Simultaneously perform rotation and scaling using SwiftUI gestures. This works well but could use some fine tuning.
struct Lab5025: View {
@State private var entityTransformAtStartOfGesture: Transform?
var body: some View {
RealityView { content in
if let root = try? await Entity(named: "GestureLab", in: realityKitContentBundle) {
root.position = [0, -0.45, 0]
if let subject = root.findEntity(named: "Cube") {
subject.components.set(HoverEffectComponent())
}
content.add(root)
}
}
.gesture(rotateScaleGesture)
}
var rotateScaleGesture: some Gesture {
RotateGesture3D(constrainedToAxis: .y)
.simultaneously(with: MagnifyGesture())
.targetedToAnyEntity()
.onChanged { value in
if entityTransformAtStartOfGesture == nil {
entityTransformAtStartOfGesture = value.entity.transform
}
if let rotation = value.first?.rotation, let magnification = value.second?.magnification, let initialScale = entityTransformAtStartOfGesture?.scale.x {
// Rotation
let rotationTransform = Transform(AffineTransform3D(rotation: rotation))
value.entity.transform.rotation = entityTransformAtStartOfGesture!.rotation * rotationTransform.rotation
// Scaling
let scaler = Float(magnification) * initialScale
let minScale: Float = 0.1
let maxScale: Float = 1
let scaled = min(Float(max(Float(scaler), minScale)), maxScale)
let newScale = SIMD3(x: scaled, y: scaled, z: scaled)
value.entity.setScale(newScale, relativeTo: value.entity.parent!)
value.entity.position.y = scaled / 10
}
}
.onEnded { value in
entityTransformAtStartOfGesture = nil
}
}
}