Complete Integration Example
This guide demonstrates a complete end-to-end integration with the Yeino API, from uploading a recording to retrieving transcripts and insights.
Overview
This example covers:
- Creating a recording - Initiate upload and get pre-signed URL
- Uploading the file - Upload directly to Azure Blob Storage
- Completing the upload - Verify upload and trigger processing
- Waiting for processing - Poll for completion status
- Retrieving transcript - Get full transcript and summary
- Retrieving insights - Get AI-generated insights
Prerequisites
- API key with
read_writepermission - Project ID from your Yeino organization
- A video, audio, or text file to upload
Complete JavaScript Example
const API_BASE_URL = 'https://api.yeino.com/v2';
const API_KEY = 'yno_live_your_api_key_here';
const PROJECT_ID = '507f1f77bcf86cd799439011';
/**
* Step 1: Initiate recording upload
* Creates a recording record and gets a pre-signed upload URL
*/
async function initiateUpload(file, projectId) {
const response = await fetch(
`${API_BASE_URL}/projects/${projectId}/recordings`,
{
method: 'POST',
headers: {
'x-api-key': API_KEY,
'Content-Type': 'application/json',
},
body: JSON.stringify({
name: file.name.replace(/\.[^/.]+$/, ''), // Remove extension
description: 'Uploaded via API integration',
filename: file.name,
mimeType: file.type,
fileSize: file.size,
}),
}
);
if (!response.ok) {
const error = await response.json();
throw new Error(`Failed to initiate upload: ${error.message}`);
}
return await response.json();
}
/**
* Step 2: Upload file to Azure Blob Storage
* Uploads the file directly to Azure using the pre-signed URL
*/
async function uploadFile(uploadUrl, file) {
const response = await fetch(uploadUrl, {
method: 'PUT',
headers: {
'x-ms-blob-type': 'BlockBlob',
'Content-Type': file.type,
},
body: file,
});
if (!response.ok) {
throw new Error(`Failed to upload file: ${response.statusText}`);
}
return true;
}
/**
* Step 3: Complete recording upload
* Verifies the upload and triggers AI processing
*/
async function completeUpload(recordingId) {
const response = await fetch(
`${API_BASE_URL}/recordings/${recordingId}/complete`,
{
method: 'POST',
headers: {
'x-api-key': API_KEY,
'Content-Type': 'application/json',
},
body: JSON.stringify({}),
}
);
if (!response.ok) {
const error = await response.json();
throw new Error(`Failed to complete upload: ${error.message}`);
}
return await response.json();
}
/**
* Step 4: Wait for processing to complete
* Polls the status endpoint until processing is finished
*/
async function waitForProcessing(recordingId, options = {}) {
const {
maxWaitTime = 3600000, // 1 hour default
pollInterval = 5000, // 5 seconds default
onProgress = null,
} = options;
const startTime = Date.now();
while (true) {
// Check if we've exceeded max wait time
if (Date.now() - startTime > maxWaitTime) {
throw new Error('Processing timeout exceeded');
}
// Get current status
const response = await fetch(
`${API_BASE_URL}/recordings/${recordingId}/status`,
{
headers: { 'x-api-key': API_KEY },
}
);
if (!response.ok) {
const error = await response.json();
throw new Error(`Failed to get status: ${error.message}`);
}
const status = await response.json();
// Call progress callback if provided
if (onProgress) {
onProgress(status);
}
// Check if processing is complete
if (status.status === 'completed') {
return status;
}
// Check if processing failed
if (status.status === 'failed') {
throw new Error(
`Processing failed: ${status.processingError?.message || 'Unknown error'}`
);
}
// Wait before next poll
await new Promise((resolve) => setTimeout(resolve, pollInterval));
}
}
/**
* Step 5: Get full recording with transcript
* Retrieves complete recording details including transcript
*/
async function getRecording(recordingId) {
const response = await fetch(
`${API_BASE_URL}/recordings/${recordingId}`,
{
headers: { 'x-api-key': API_KEY },
}
);
if (!response.ok) {
const error = await response.json();
throw new Error(`Failed to get recording: ${error.message}`);
}
return await response.json();
}
/**
* Step 6: Get insights for the recording
* Retrieves AI-generated insights for a specific recording
*/
async function getRecordingInsights(projectId, recordingId) {
const response = await fetch(
`${API_BASE_URL}/projects/${projectId}/insights?recordingId=${recordingId}`,
{
headers: { 'x-api-key': API_KEY },
}
);
if (!response.ok) {
const error = await response.json();
throw new Error(`Failed to get insights: ${error.message}`);
}
return await response.json();
}
/**
* Complete workflow: Upload and process a recording
*/
async function uploadAndProcessRecording(file, projectId) {
try {
console.log('Step 1: Initiating upload...');
const { recordingId, uploadUrl } = await initiateUpload(file, projectId);
console.log(`Recording created: ${recordingId}`);
console.log('Step 2: Uploading file to Azure...');
await uploadFile(uploadUrl, file);
console.log('File uploaded successfully');
console.log('Step 3: Completing upload...');
await completeUpload(recordingId);
console.log('Upload completed, processing started');
console.log('Step 4: Waiting for processing...');
await waitForProcessing(recordingId, {
onProgress: (status) => {
console.log(`Status: ${status.status}`);
},
});
console.log('Processing completed!');
console.log('Step 5: Retrieving transcript...');
const recording = await getRecording(recordingId);
console.log('Transcript retrieved');
console.log('Step 6: Retrieving insights...');
const insights = await getRecordingInsights(projectId, recordingId);
console.log(`Found ${insights.total} insights`);
return {
recording,
insights,
};
} catch (error) {
console.error('Error:', error.message);
throw error;
}
}
// Usage example
const fileInput = document.querySelector('input[type="file"]');
fileInput.addEventListener('change', async (event) => {
const file = event.target.files[0];
if (!file) return;
try {
const result = await uploadAndProcessRecording(file, PROJECT_ID);
// Display transcript summary
console.log('Summary:', result.recording.transcriptSummary?.summary);
console.log('Key Topics:', result.recording.transcriptSummary?.keyTopics);
// Display insights
result.insights.data.forEach((insight) => {
console.log(`- ${insight.title} (${insight.priority})`);
});
} catch (error) {
console.error('Upload failed:', error);
}
});Python Example
import requests
import time
from typing import Optional, Dict, Any
API_BASE_URL = "https://api.yeino.com/v2"
API_KEY = "yno_live_your_api_key_here"
PROJECT_ID = "507f1f77bcf86cd799439011"
def initiate_upload(file_path: str, project_id: str) -> Dict[str, Any]:
"""Step 1: Initiate recording upload"""
with open(file_path, 'rb') as f:
file_data = f.read()
file_name = file_path.split('/')[-1]
# Determine MIME type (simplified - use mimetypes module in production)
mime_type = "video/mp4" if file_name.endswith('.mp4') else "audio/mpeg"
response = requests.post(
f"{API_BASE_URL}/projects/{project_id}/recordings",
headers={
"x-api-key": API_KEY,
"Content-Type": "application/json",
},
json={
"name": file_name.replace('.mp4', '').replace('.mp3', ''),
"description": "Uploaded via API integration",
"filename": file_name,
"mimeType": mime_type,
"fileSize": len(file_data),
},
)
response.raise_for_status()
return response.json()
def upload_file(upload_url: str, file_path: str) -> None:
"""Step 2: Upload file to Azure Blob Storage"""
with open(file_path, 'rb') as f:
file_data = f.read()
file_name = file_path.split('/')[-1]
mime_type = "video/mp4" if file_name.endswith('.mp4') else "audio/mpeg"
response = requests.put(
upload_url,
headers={
"x-ms-blob-type": "BlockBlob",
"Content-Type": mime_type,
},
data=file_data,
)
response.raise_for_status()
def complete_upload(recording_id: str) -> Dict[str, Any]:
"""Step 3: Complete recording upload"""
response = requests.post(
f"{API_BASE_URL}/recordings/{recording_id}/complete",
headers={
"x-api-key": API_KEY,
"Content-Type": "application/json",
},
json={},
)
response.raise_for_status()
return response.json()
def wait_for_processing(
recording_id: str,
max_wait_time: int = 3600,
poll_interval: int = 5,
on_progress: Optional[callable] = None,
) -> Dict[str, Any]:
"""Step 4: Wait for processing to complete"""
start_time = time.time()
while True:
if time.time() - start_time > max_wait_time:
raise TimeoutError("Processing timeout exceeded")
response = requests.get(
f"{API_BASE_URL}/recordings/{recording_id}/status",
headers={"x-api-key": API_KEY},
)
response.raise_for_status()
status = response.json()
if on_progress:
on_progress(status)
if status["status"] == "completed":
return status
if status["status"] == "failed":
error_msg = status.get("processingError", {}).get("message", "Unknown error")
raise Exception(f"Processing failed: {error_msg}")
time.sleep(poll_interval)
def get_recording(recording_id: str) -> Dict[str, Any]:
"""Step 5: Get full recording with transcript"""
response = requests.get(
f"{API_BASE_URL}/recordings/{recording_id}",
headers={"x-api-key": API_KEY},
)
response.raise_for_status()
return response.json()
def get_recording_insights(project_id: str, recording_id: str) -> Dict[str, Any]:
"""Step 6: Get insights for the recording"""
response = requests.get(
f"{API_BASE_URL}/projects/{project_id}/insights",
headers={"x-api-key": API_KEY},
params={"recordingId": recording_id},
)
response.raise_for_status()
return response.json()
def upload_and_process_recording(file_path: str, project_id: str) -> Dict[str, Any]:
"""Complete workflow: Upload and process a recording"""
print("Step 1: Initiating upload...")
init_result = initiate_upload(file_path, project_id)
recording_id = init_result["recordingId"]
upload_url = init_result["uploadUrl"]
print(f"Recording created: {recording_id}")
print("Step 2: Uploading file to Azure...")
upload_file(upload_url, file_path)
print("File uploaded successfully")
print("Step 3: Completing upload...")
complete_upload(recording_id)
print("Upload completed, processing started")
print("Step 4: Waiting for processing...")
def progress_callback(status):
print(f"Status: {status['status']}")
wait_for_processing(recording_id, on_progress=progress_callback)
print("Processing completed!")
print("Step 5: Retrieving transcript...")
recording = get_recording(recording_id)
print("Transcript retrieved")
print("Step 6: Retrieving insights...")
insights = get_recording_insights(project_id, recording_id)
print(f"Found {insights['total']} insights")
return {
"recording": recording,
"insights": insights,
}
# Usage example
if __name__ == "__main__":
try:
result = upload_and_process_recording("interview.mp4", PROJECT_ID)
# Display transcript summary
summary = result["recording"].get("transcriptSummary", {})
print("\nSummary:", summary.get("summary"))
print("Key Topics:", summary.get("keyTopics", []))
# Display insights
print("\nInsights:")
for insight in result["insights"]["data"]:
print(f"- {insight['title']} ({insight['priority']})")
except Exception as e:
print(f"Error: {e}")cURL Example
For testing or simple integrations, here’s the complete workflow using cURL:
#!/bin/bash
API_BASE_URL="https://api.yeino.com/v2"
API_KEY="yno_live_your_api_key_here"
PROJECT_ID="507f1f77bcf86cd799439011"
FILE_PATH="interview.mp4"
# Step 1: Initiate upload
echo "Step 1: Initiating upload..."
INIT_RESPONSE=$(curl -s -X POST \
"${API_BASE_URL}/projects/${PROJECT_ID}/recordings" \
-H "x-api-key: ${API_KEY}" \
-H "Content-Type: application/json" \
-d "{
\"name\": \"User Interview\",
\"filename\": \"interview.mp4\",
\"mimeType\": \"video/mp4\",
\"fileSize\": $(stat -f%z "$FILE_PATH")
}")
RECORDING_ID=$(echo $INIT_RESPONSE | jq -r '.recordingId')
UPLOAD_URL=$(echo $INIT_RESPONSE | jq -r '.uploadUrl')
echo "Recording ID: $RECORDING_ID"
# Step 2: Upload file
echo "Step 2: Uploading file..."
curl -X PUT "$UPLOAD_URL" \
-H "x-ms-blob-type: BlockBlob" \
-H "Content-Type: video/mp4" \
--data-binary "@$FILE_PATH"
echo "File uploaded"
# Step 3: Complete upload
echo "Step 3: Completing upload..."
curl -s -X POST \
"${API_BASE_URL}/recordings/${RECORDING_ID}/complete" \
-H "x-api-key: ${API_KEY}" \
-H "Content-Type: application/json" \
-d '{}' > /dev/null
echo "Processing started"
# Step 4: Wait for processing
echo "Step 4: Waiting for processing..."
STATUS="processing"
while [ "$STATUS" != "completed" ]; do
sleep 5
STATUS_RESPONSE=$(curl -s \
"${API_BASE_URL}/recordings/${RECORDING_ID}/status" \
-H "x-api-key: ${API_KEY}")
STATUS=$(echo $STATUS_RESPONSE | jq -r '.status')
echo "Status: $STATUS"
if [ "$STATUS" = "failed" ]; then
echo "Processing failed!"
exit 1
fi
done
echo "Processing completed!"
# Step 5: Get recording
echo "Step 5: Retrieving transcript..."
RECORDING=$(curl -s \
"${API_BASE_URL}/recordings/${RECORDING_ID}" \
-H "x-api-key: ${API_KEY}")
echo "Transcript retrieved"
echo "$RECORDING" | jq -r '.transcriptSummary.summary'
# Step 6: Get insights
echo "Step 6: Retrieving insights..."
INSIGHTS=$(curl -s \
"${API_BASE_URL}/projects/${PROJECT_ID}/insights?recordingId=${RECORDING_ID}" \
-H "x-api-key: ${API_KEY}")
TOTAL=$(echo $INSIGHTS | jq -r '.total')
echo "Found $TOTAL insights"
echo "$INSIGHTS" | jq -r '.data[] | "- \(.title) (\(.priority))"'Error Handling
Always implement proper error handling in your integration:
async function uploadWithRetry(file, projectId, maxRetries = 3) {
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
return await uploadAndProcessRecording(file, projectId);
} catch (error) {
if (attempt === maxRetries) {
throw error;
}
// Exponential backoff
const delay = Math.pow(2, attempt) * 1000;
console.log(`Retry ${attempt}/${maxRetries} after ${delay}ms...`);
await new Promise(resolve => setTimeout(resolve, delay));
}
}
}Best Practices
- Handle rate limits - Implement exponential backoff for 429 responses
- Validate file types - Check MIME type and file size before uploading
- Monitor processing - Use webhooks or polling to track status
- Store recording IDs - Keep track of recording IDs for later retrieval
- Error recovery - Implement retry logic for transient failures
- Progress tracking - Show upload and processing progress to users
- Secure API keys - Never expose API keys in client-side code
Next Steps
- Explore the Recordings API for more details on each endpoint
- Check the Insights API for filtering and pagination options
- Review Authentication for security best practices
Last updated on