The LM Studio TypeScript SDK (@lmstudio/sdk) provides a powerful interface for interacting with local LLMs through LM Studio. It’s MIT licensed and supports both Node.js and browser environments.
License: MIT
Repository: lmstudio-ai/lmstudio-js
Stars: 1.5k+
NPM: @lmstudio/sdk
npm install @lmstudio/sdk
# or
yarn add @lmstudio/sdk
# or
pnpm add @lmstudio/sdk
<script type="module">
import { LMStudio } from 'https://esm.sh/@lmstudio/sdk';
</script>
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
const model = await lms.llm();
const response = await model.complete("Once upon a time,");
console.log(response);
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
const model = await lms.llm();
const chat = model.createChat("You are a helpful shopkeeper");
chat.on("message", (message) => {
console.log(`${message.role}: ${message.content}`);
});
chat.user("Hello! What do you sell?");
await chat.respond();
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
const model = await lms.llm();
const stream = model.completeStream("Write a haiku about coding:");
for await (const chunk of stream) {
process.stdout.write(chunk);
}
import { LMStudio } from "@lmstudio/sdk";
// Create client (auto-connects to local LM Studio)
const lms = new LMStudio();
// Create client with custom URL
const lms = new LMStudio({
baseUrl: "http://localhost:1234"
});
// List loaded models
const models = await lms.listLoadedModels();
// List downloaded models
const downloaded = await lms.listDownloadedModels();
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
// Load model
const model = await lms.llm("llama-3-8b-instruct");
// Load with custom parameters
const model = await lms.llm("llama-3-8b-instruct", {
contextLength: 8192,
gpuOffload: "max"
});
// Unload model
await lms.unload("llama-3-8b-instruct");
// Unload all models
await lms.unloadAll();
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
const model = await lms.llm();
// Basic completion
const response = await model.complete("The future of AI is");
// With parameters
const response = await model.complete("Write a story about", {
maxTokens: 500,
temperature: 0.7,
topP: 0.9
});
// Streaming
const stream = model.completeStream("Write a poem:");
for await (const chunk of stream) {
process.stdout.write(chunk);
}
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
const model = await lms.llm();
// Create chat
const chat = model.createChat("You are a helpful assistant");
// Listen to messages
chat.on("message", (message) => {
console.log(`${message.role}: ${message.content}`);
});
// Add messages
chat.user("Hello!");
chat.assistant("Hi! How can I help?");
chat.user("Tell me a joke");
// Get response
await chat.respond();
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
// Define functions
const tools = [
{
name: "get_weather",
description: "Get weather for a location",
parameters: {
type: "object",
properties: {
location: { type: "string" }
},
required: ["location"]
},
execute: async ({ location }: { location: string }) => {
return `Weather in ${location}: Sunny, 25°C`;
}
},
{
name: "calculate",
description: "Calculate mathematical expression",
parameters: {
type: "object",
properties: {
expression: { type: "string" }
},
required: ["expression"]
},
execute: async ({ expression }: { expression: string }) => {
return String(eval(expression));
}
}
];
// Create model with tools
const model = await lms.llm({ tools });
// Chat with function calling
const chat = model.createChat();
chat.user("What's the weather in Paris?");
await chat.respond();
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
// Load embedding model
const embeddingModel = await lms.embedding();
// Generate embedding
const embedding = await embeddingModel.embed("Hello, world!");
console.log(`Embedding length: ${embedding.length}`);
// Batch embeddings
const embeddings = await embeddingModel.embedBatch([
"First text",
"Second text",
"Third text"
]);
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
const model = await lms.llm();
// Load image
const image = await lms.loadImage("./image.jpg");
// Chat with image
const chat = model.createChat();
chat.user("What's in this image?", { attachments: [image] });
await chat.respond();
import { LMStudio } from "@lmstudio/sdk";
// Custom client with retry logic
const lms = new LMStudio({
baseUrl: "http://localhost:1234",
timeout: 30000,
maxRetries: 3
});
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
const model = await lms.llm("llama-3-8b-instruct");
const info = await model.info();
console.log(`Model: ${info.id}`);
console.log(`Context length: ${info.contextLength}`);
console.log(`Parameters: ${info.parameterCount}`);
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
const model = await lms.llm();
// Parallel completions
const prompts = [
"Story about cats",
"Story about dogs",
"Story about birds"
];
const results = await Promise.all(
prompts.map(prompt => model.complete(prompt))
);
results.forEach(result => console.log(result));
import { LMStudio, LMStudioError } from "@lmstudio/sdk";
try {
const lms = new LMStudio();
const model = await lms.llm("nonexistent-model");
} catch (error) {
if (error instanceof LMStudioError) {
console.error(`LM Studio error: ${error.message}`);
} else {
console.error(`Unknown error: ${error}`);
}
}
import { LMStudio } from "@lmstudio/sdk";
const lms = new LMStudio();
// Connection events
lms.on("connected", () => console.log("Connected to LM Studio"));
lms.on("disconnected", () => console.log("Disconnected from LM Studio"));
lms.on("error", (error) => console.error("Connection error:", error));
// Model events
const model = await lms.llm();
model.on("load", () => console.log("Model loaded"));
model.on("unload", () => console.log("Model unloaded"));
import express from "express";
import { LMStudio } from "@lmstudio/sdk";
const app = express();
app.use(express.json());
const lms = new LMStudio();
app.post("/complete", async (req, res) => {
const { prompt, maxTokens = 500 } = req.body;
const model = await lms.llm();
const response = await model.complete(prompt, { maxTokens });
res.json({ response });
});
app.post("/chat", async (req, res) => {
const { message } = req.body;
const model = await lms.llm();
const chat = model.createChat("You are a helpful assistant");
chat.user(message);
const response = await chat.respond();
res.json({ response });
});
app.listen(3000, () => {
console.log("Server running on port 3000");
});
import React, { useState } from "react";
import { LMStudio } from "@lmstudio/sdk";
function ChatComponent() {
const [messages, setMessages] = useState<{role: string, content: string}[]>([]);
const [input, setInput] = useState("");
const [loading, setLoading] = useState(false);
const sendMessage = async () => {
setLoading(true);
const lms = new LMStudio();
const model = await lms.llm();
const chat = model.createChat("You are a helpful assistant");
const newMessages = [...messages, { role: "user", content: input }];
setMessages(newMessages);
chat.on("message", (message) => {
if (message.role === "assistant") {
setMessages(prev => [...prev, { role: "assistant", content: message.content }]);
}
});
chat.user(input);
await chat.respond();
setLoading(false);
setInput("");
};
return (
<div>
{messages.map((msg, i) => (
<div key={i}>{msg.role}: {msg.content}</div>
))}
<input value={input} onChange={e => setInput(e.target.value)} />
<button onClick={sendMessage} disabled={loading}>Send</button>
</div>
);
}
// app/api/chat/route.ts
import { NextRequest, NextResponse } from "next/server";
import { LMStudio } from "@lmstudio/sdk";
export async function POST(request: NextRequest) {
const { message } = await request.json();
const lms = new LMStudio();
const model = await lms.llm();
const chat = model.createChat("You are a helpful assistant");
chat.user(message);
const response = await chat.respond();
return NextResponse.json({ response });
}
The SDK uses LM Studio’s configuration:
%APPDATA%/LM Studio~/Library/Application Support/LM Studio~/.config/LM Studio# Custom API endpoint
export LMSTUDIO_API_BASE=http://localhost:1234
# Authentication token
export LMSTUDIO_API_KEY=your-token
// Use in code
const lms = new LMStudio({
baseUrl: process.env.LMSTUDIO_API_BASE,
apiKey: process.env.LMSTUDIO_API_KEY
});
// Ensure LM Studio is running
const lms = new LMStudio();
try {
await lms.listLoadedModels();
console.log("Connected!");
} catch (error) {
console.error("Connection error:", error);
}
// List available models
const lms = new LMStudio();
const models = await lms.listDownloadedModels();
console.log(models);
// Increase timeout
const lms = new LMStudio({ timeout: 60000 });
Any questions?
Feel free to contact us. Find all contact information on our contact page.