Skip to main content

Inline File Input

Inline File Input Support Across Providers

Javelin provides a unified interface for working with inline file input (base64-encoded or inline reading) across all supported AI providers.

This allows developers to seamlessly pass files (documents, images, media, etc.) to models without needing provider-specific handling.

For details on supported file types and provider-specific limitations, please refer to the official documentation linked below.


ProviderInline File Input SupportOfficial Documentation
OpenAI✅ Base64 inlineOpenAI File Guide
Azure OpenAI✅ Responses API supports base64 inlineAzure Responses API
Anthropic✅ Base64 inlineAnthropic File Handling
Amazon Bedrock✅ Inline readingBedrock Example
Gemini✅ Inline readingGemini Inline Files

note

While Javelin ensures a consistent API experience, the exact file formats and size limits are defined by each provider. Please review their official documentation for the most up-to-date details.

Example: Inline File Input via Javelin Unified Endpoint

OpenAI
import requests, os, base64, mimetypes
# Config
JAVELIN_OPEN_API = "https://your-api-domain.com/v1/responses"
OPENAI_KEY = os.getenv("OPENAI_KEY")
JAVELIN_API_KEY = os.getenv("JAVELIN_API_KEY")

FILE_PATH = "/your/path/to/pdf-file"

def get_base64(file_path):
with open(file_path, "rb") as f:
data = f.read()

return base64.b64encode(data).decode("utf-8")

base64_string = get_base64(FILE_PATH)

payload = {
"model": "gpt-5",
"input": [
{
"role": "user",
"content": [
{"type": "input_text", "text": "What is this file about?"},
{
"type": "input_file",
"file_data": f"data:application/pdf;base64,{base64_string}",
"filename": <your-file-name>,
},
],
}
],
}

headers = {
"Authorization": f"Bearer {OPENAI_KEY}",
"Content-Type": "application/json",
"x-javelin-apikey": JAVELIN_API_KEY,
"x-javelin-route": "<your-javelin-unified-route>",
}

resp = requests.post(JAVELIN_OPEN_API, headers=headers, json=payload)
resp.raise_for_status()
print("\n=== Model Output ===")
print(resp.json().get("output_text", resp.json()))
Azure OpenAI
import requests, os, base64, mimetypes

# Config
JAVELIN_AZURE_API = "https://your-api-domain.com/v1/openai/responses?api-version=2025-04-01-preview"
AZURE_OPENAI_KEY = os.getenv("AZURE_OPENAI_KEY")
JAVELIN_API_KEY = os.getenv("JAVELIN_API_KEY")

FILE_PATH = "/your/path/to/pdf-file"

def get_base64(file_path):
with open(file_path, "rb") as f:
data = f.read()

return base64.b64encode(data).decode("utf-8")

base64_string = get_base64(FILE_PATH)

payload = {
"model": "gpt-5",
"input": [
{
"role": "user",
"content": [
{"type": "input_text", "text": "What is this file about?"},
{
"type": "input_file",
"file_data": f"data:{mime_type};base64,{base64_string}",
"filename": filename,
},
],
}
],
}

headers = {
"Api-Key": f"{AZURE_OPENAI_KEY}",
"Content-Type": "application/json",
"x-javelin-apikey": JAVELIN_API_KEY,
"x-javelin-route": "your-javelin-unified-route",
}

resp = requests.post(JAVELIN_AZURE_API, headers=headers, json=payload)
resp.raise_for_status()
print("\n=== Model Output ===")
print(resp.json().get("output_text", resp.json()))
Amazon Bedrock
note

Amazon Bedrock works with both base64-encoded files and raw inline files.
The example below demonstrates usage without base64 encoding.

import requests, os, base64

model_id = "anthropic.claude-3-haiku-20240307-v1:0"

# Config
JAVELIN_AMAZON_API = f"https://your-api-domain.com/v1/model/{model_id}/converse"
JAVELIN_API_KEY = os.getenv("JAVELIN_API_KEY")

FILE_PATH = "/your/path/to/pdf-file"

def get_doc_bytes(file_path):
with open(file_path, "rb") as f:
data = f.read()

return data

doc_byte = get_doc_bytes(FILE_PATH)

payload = {
"messages": [
{
"role": "user",
"content": [
{
"text": "Briefly compare the models described in this document",
},
{
"document":
{
"format": "pdf",
"name": "Amazon Nova Service Cards",
"source": {
"bytes": doc_byte
}
}
}
]
}
]
}

headers = {
"Content-Type": "application/json",
"x-javelin-apikey": JAVELIN_API_KEY,
"x-javelin-route": "<your-javelin-unified-route>"
}

resp = requests.post(JAVELIN_AMAZON_API, headers=headers, json=payload)
resp.raise_for_status()
print("\n=== Model Output ===")
print(resp.json().get("output_text", resp.json()))