đąReact Native example with react-native-vision-camera
How to use the tracking pixel in a react native application ?
Base on the schema on the schema. You need to call our URL https://app.posetracker.com/pose_tracker/pixel through a WebView.
Base on this here is a tutorial to use the pixel in a Mobile App :
Start a new Expo project
npx create-expo-app my-app
cd my-app
yarn install
Install dependencies for the webview
npx expo install react-native-webview
Create a build and run it your Phone. You cannot use expo go here because of react-native-vision-camera
Integrate PoseTracker
Define your App.js and set your API_KEY :
import React, { useState, useEffect, useRef } from 'react';
import { StyleSheet, View, Dimensions, Text } from 'react-native';
import { Camera, useCameraDevices } from 'react-native-vision-camera';
import { WebView } from 'react-native-webview';
import Canvas from 'react-native-canvas';
export default function App() {
const [cameraPermission, setCameraPermission] = useState(false);
const cameraRef = useRef(null);
const webviewRef = useRef(null);
const canvasRef = useRef(null);
const devices = useCameraDevices();
const device = devices.front;
useEffect(() => {
const getPermissions = async () => {
const status = await Camera.requestCameraPermission();
setCameraPermission(status === 'authorized');
};
getPermissions();
}, []);
useEffect(() => {
if (cameraPermission && cameraRef.current) {
const intervalId = setInterval(() => {
sendFrameToWebView();
}, 100); // Adjust as needed
return () => clearInterval(intervalId);
}
}, [cameraPermission]);
const handleCanvas = (canvas) => {
if (canvas) {
const ctx = canvas.getContext('2d');
canvasRef.current = ctx;
canvas.width = Dimensions.get('window').width;
canvas.height = Dimensions.get('window').width;
}
};
const drawKeypoints = (keypoints) => {
const ctx = canvasRef.current;
if (ctx) {
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
keypoints.forEach(keypoint => {
if (keypoint.score > 0.5) {
const { x, y } = keypoint;
ctx.beginPath();
ctx.arc(x, y, 5, 0, 2 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
}
});
}
};
const sendFrameToWebView = (frame) => {
// This is where you can access the frame buffer from the Camera.
// React Native Vision Camera doesn't directly allow you to pull frames like a snapshot,
// but you can create a custom plugin to send frame data in real-time.
// The actual implementation would require native modules for processing.
// For now, assume we have frame data and send it to the WebView as a base64 string. MA
webviewRef.current.postMessage(
JSON.stringify({
type: 'posetracker_image',
// NEED TO BE ADAPT TO PASS A base64 string
image: frame,
})
);
};
if (!device) return <View style={styles.container}><Text>No camera devices found</Text></View>;
return (
<View style={styles.container}>
{cameraPermission && (
<Camera
ref={cameraRef}
style={styles.camera}
device={device}
isActive={true}
frameProcessor={sendFrameToWebView}
/>
)}
<Canvas ref={handleCanvas} style={styles.canvas} />
<WebView
ref={webviewRef}
style={styles.webview}
source={{ uri: 'https://app.posetracker.com/pose_tracker/pixel?token=YOUR_API_TOKEN&exercise=squat' }}
onMessage={(event) => {
const data = JSON.parse(event.nativeEvent.data);
if (data.type === 'keypoints') {
drawKeypoints(data.data.keypoints);
}
}}
/>
</View>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#fff',
justifyContent: 'center',
},
camera: {
width: Dimensions.get('window').width,
height: Dimensions.get('window').width,
},
canvas: {
position: 'absolute',
width: Dimensions.get('window').width,
height: Dimensions.get('window').width,
},
webview: {
width: Dimensions.get('window').width,
height: Dimensions.get('window').height - Dimensions.get('window').width,
},
});
Last updated