Documentation Index Fetch the complete documentation index at: https://docs.agentfront.dev/llms.txt
Use this file to discover all available pages before exploring further.
Interface
interface DirectClient {
// Tools
listTools (): Promise < FormattedTools >;
callTool ( name : string , args ?: Record < string , unknown >): Promise < FormattedToolResult >;
// Resources
listResources (): Promise < ListResourcesResult >;
readResource ( uri : string ): Promise < ReadResourceResult >;
listResourceTemplates (): Promise < ListResourceTemplatesResult >;
// Prompts
listPrompts (): Promise < ListPromptsResult >;
getPrompt ( name : string , args ?: Record < string , string >): Promise < GetPromptResult >;
// Skills
searchSkills ( query : string , options ?: SearchSkillsOptions ): Promise < SearchSkillsResult >;
loadSkills ( skillIds : string [], options ?: LoadSkillsOptions ): Promise < LoadSkillsResult >;
listSkills ( options ?: ListSkillsOptions ): Promise < ListSkillsResult >;
// Jobs
listJobs ( options ?: ListJobsOptions ): Promise < ListJobsResult >;
executeJob ( name : string , input ?: Record < string , unknown >, options ?: ExecuteJobOptions ): Promise < JobExecutionResult >;
getJobStatus ( runId : string ): Promise < JobStatusResult >;
// Workflows
listWorkflows ( options ?: ListWorkflowsOptions ): Promise < ListWorkflowsResult >;
executeWorkflow ( name : string , input ?: Record < string , unknown >, options ?: ExecuteWorkflowOptions ): Promise < WorkflowExecutionResult >;
getWorkflowStatus ( runId : string ): Promise < WorkflowStatusResult >;
// Info
getSessionId (): string ;
getClientInfo (): ClientInfo ;
getServerInfo (): Implementation ;
getCapabilities (): ServerCapabilities ;
getDetectedPlatform (): LLMPlatform ;
// Lifecycle
close (): Promise < void >;
}
Connection Functions
connect(config, options?)
Create a generic DirectClient.
import { connect } from ' @frontmcp/sdk ' ;
const client = await connect ( config , {
clientInfo : { name : ' my-agent ' , version : ' 1.0.0 ' },
authToken : ' jwt-token ' ,
});
connectOpenAI(config, options?)
Create a client with OpenAI tool formatting.
import { connectOpenAI } from ' @frontmcp/sdk ' ;
const client = await connectOpenAI ( config );
const tools = await client . listTools ();
// tools are in OpenAI function format
connectClaude(config, options?)
Create a client with Anthropic Claude formatting.
import { connectClaude } from ' @frontmcp/sdk ' ;
const client = await connectClaude ( config );
const tools = await client . listTools ();
// tools are in Claude tool format
connectVercelAI(config, options?)
Create a client with Vercel AI SDK formatting.
import { connectVercelAI } from ' @frontmcp/sdk ' ;
const client = await connectVercelAI ( config );
const tools = await client . listTools ();
// tools are in Vercel AI format
Connection Options
interface ConnectOptions {
clientInfo ?: ClientInfo ;
session ?: SessionOptions ;
authToken ?: string ;
capabilities ?: Partial < ClientCapabilities >;
}
interface SessionOptions {
id ?: string ;
user ?: {
sub ?: string ;
name ?: string ;
email ?: string ;
[ key : string ]: unknown ;
};
}
interface ClientInfo {
name : string ;
version : string ;
}
List all available tools with platform-specific formatting.
const tools = await client . listTools ();
// FormattedTools depends on detected platform
Execute a tool and get platform-formatted result.
const result = await client . callTool ( ' my_tool ' , {
input : ' test ' ,
count : 5 ,
});
Resource Operations
listResources()
List all static resources.
const { resources } = await client . listResources ();
resources . forEach ( r => console . log ( r . uri , r . name ));
readResource(uri)
Read a resource by URI.
const { contents } = await client . readResource ( ' config://app ' );
console . log ( contents [ 0 ]. text );
listResourceTemplates()
List all resource templates.
const { resourceTemplates } = await client . listResourceTemplates ();
resourceTemplates . forEach ( t => console . log ( t . uriTemplate ));
Prompt Operations
listPrompts()
List all prompts.
const { prompts } = await client . listPrompts ();
prompts . forEach ( p => console . log ( p . name , p . description ));
getPrompt(name, args?)
Get a prompt with arguments.
const { messages } = await client . getPrompt ( ' research ' , {
topic : ' AI safety ' ,
});
Skill Operations
searchSkills(query, options?)
Search for skills.
const { skills } = await client . searchSkills ( ' code review ' , {
limit : 5 ,
});
loadSkills(skillIds, options?)
Load skills into the session.
await client . loadSkills ([ ' review-pr ' ], {
activateSession : true ,
policyMode : ' strict ' ,
});
listSkills(options?)
List all available skills.
const { skills } = await client . listSkills ();
Job Operations
listJobs(options?)
List all available jobs.
const { jobs , count } = await client . listJobs ();
jobs . forEach ( j => console . log ( j . name , j . description ));
// Filter by tags
const filtered = await client . listJobs ({ tags : [ ' notification ' ] });
Execute a job by name. Supports background mode.
// Inline execution (waits for completion)
const result = await client . executeJob ( ' analyze-text ' , {
text : ' Hello world ' ,
language : ' en ' ,
});
console . log ( result . state ); // 'completed'
console . log ( result . result ); // { sentiment: 'positive', ... }
// Background execution (returns immediately with runId)
const { runId } = await client . executeJob ( ' analyze-text ' , {
text : ' Hello world ' ,
}, { background : true });
getJobStatus(runId)
Get the status of a job run.
const status = await client . getJobStatus ( runId );
console . log ( status . state ); // 'running' | 'completed' | 'failed'
console . log ( status . result ); // Job result (when completed)
console . log ( status . logs ); // Execution logs
console . log ( status . attempt ); // Current retry attempt
Workflow Operations
listWorkflows(options?)
List all available workflows.
const { workflows , count } = await client . listWorkflows ();
workflows . forEach ( w => console . log ( w . name , w . trigger , w . steps ?. length ));
Execute a workflow by name.
// Inline execution
const result = await client . executeWorkflow ( ' data-pipeline ' , {
source : ' api ' ,
});
console . log ( result . state ); // 'completed' | 'failed'
console . log ( result . stepResults ); // Per-step results
// Background execution
const { runId } = await client . executeWorkflow ( ' data-pipeline ' , {
source : ' api ' ,
}, { background : true });
getWorkflowStatus(runId)
Get the status of a workflow run with per-step details.
const status = await client . getWorkflowStatus ( runId );
console . log ( status . state ); // 'running' | 'completed' | 'failed'
console . log ( status . workflowName ); // Workflow name
console . log ( status . stepResults ); // { step1: { state, outputs }, ... }
Elicitation
onElicitation(handler)
Register handler for elicitation requests.
const unsubscribe = client . onElicitation ( async ( request ) => {
console . log ( ' Server asking: ' , request . message );
// Get user input
const response = await getUserInput ( request . requestedSchema );
return {
action : ' accept ' ,
content : response ,
};
});
// Later: unsubscribe()
submitElicitationResult(elicitId, response)
Submit elicitation response.
await client . submitElicitationResult ( ' elicit-123 ' , {
action : ' accept ' ,
content : { confirmed : true },
});
Lifecycle
close()
Close the client connection.
Usage Examples
import { connect } from ' @frontmcp/sdk ' ;
import config from ' ./server ' ;
describe ( ' Tools ' , () => {
let client : DirectClient ;
beforeAll ( async () => {
client = await connect ( config );
});
afterAll ( async () => {
await client . close ();
});
test ( ' list tools ' , async () => {
const tools = await client . listTools ();
expect ( tools . length ). toBeGreaterThan ( 0 );
});
test ( ' call tool ' , async () => {
const result = await client . callTool ( ' get_user ' , { userId : ' 123 ' });
expect ( result . id ). toBe ( ' 123 ' );
});
});
OpenAI Integration
import { connectOpenAI } from ' @frontmcp/sdk ' ;
import OpenAI from ' openai ' ;
const client = await connectOpenAI ( config );
const openai = new OpenAI ();
// Get tools in OpenAI format
const tools = await client . listTools ();
// Use with OpenAI
const completion = await openai . chat . completions . create ({
model : ' gpt-4 ' ,
messages : [{ role : ' user ' , content : ' List all users ' }],
tools ,
});
// Handle tool calls
for ( const toolCall of completion . choices [ 0 ]. message . tool_calls || []) {
const result = await client . callTool (
toolCall . function . name ,
JSON . parse ( toolCall . function . arguments )
);
// Continue conversation with result...
}
Skill Workflow
const client = await connect ( config );
// Search for relevant skills
const search = await client . searchSkills ( ' data analysis ' , { limit : 3 });
console . log ( ' Found skills: ' , search . skills . map ( s => s . name ));
// Load skill into session
const load = await client . loadSkills ([ search . skills [ 0 ]. id ], {
activateSession : true ,
policyMode : ' strict ' ,
});
// Now only skill's tools are available
const tools = await client . listTools ();
// tools are filtered to skill's allowlist
With Authentication
import { connect } from ' @frontmcp/sdk ' ;
import config from ' ./server ' ;
const client = await connect ( config , {
authToken : ' eyJ... ' , // JWT token
session : {
user : {
sub : ' user-123 ' ,
name : ' John Doe ' ,
email : ' john@example.com ' ,
},
},
});
// Tools will have access to auth context
const result = await client . callTool ( ' get_my_profile ' );
Scope Caching
Connection functions use WeakMap caching to ensure singleton scopes:
// Same config object = same scope
const client1 = await connect ( config );
const client2 = await connect ( config ); // Reuses scope
// Different config = different scope
const client3 = await connect ({ ... config }); // New scope
FrontMcpInstance Server bootstrap
Testing Overview Testing guide
Runtime Modes Deployment modes
Skills Skills documentation