Please wait while we prepare your experience
The HelpingAI JavaScript SDK provides a convenient way to interact with the HelpingAI API from JavaScript and Node.js applications. It includes full TypeScript support and all the features you need to build emotionally intelligent applications.
Install the HelpingAI JavaScript SDK using npm:
npm install helpingai
Or using yarn:
yarn add helpingai
import { HelpingAI } from 'helpingai';
// Initialize the client
const client = new HelpingAI({
apiKey: 'your-api-key-here'
});
// Make your first request
async function main() {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{role: 'user', content: "Hello! I'm excited to try HelpingAI."}
]
});
console.log(response.choices[0].message.content);
}
main();
You can also set your API key as an environment variable:
export HELPINGAI_API_KEY="your-api-key-here"
import { HelpingAI } from 'helpingai';
// Client will automatically use HELPINGAI_API_KEY
const client = new HelpingAI();
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{role: 'user', content: 'Hello!'}
]
});
<!DOCTYPE html>
<html>
<head>
<title>HelpingAI Browser Example</title>
</head>
<body>
<script type="module">
import { HelpingAI } from 'https://cdn.skypack.dev/helpingai';
const client = new HelpingAI({
apiKey: 'your-api-key-here'
});
async function chat() {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{role: 'user', content: 'Hello from the browser!'}
]
});
document.body.innerHTML = response.choices[0].message.content;
}
chat();
</script>
</body>
</html>
Generate conversational responses with emotional intelligence:
import { HelpingAI } from 'helpingai';
const client = new HelpingAI({
apiKey: 'your-api-key'
});
async function basicChat() {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{role: 'system', content: 'You are a helpful assistant.'},
{role: 'user', content: "I'm feeling overwhelmed with work today."}
],
temperature: 0.7,
max_tokens: 200
});
console.log(response.choices[0].message.content);
console.log(`Tokens used: ${response.usage.total_tokens}`);
}
basicChat();
Get real-time responses as they're generated:
import { HelpingAI } from 'helpingai';
const client = new HelpingAI({
apiKey: 'your-api-key'
});
async function streamingChat() {
const stream = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{role: 'user', content: 'Tell me a story about courage'}
],
stream: true,
temperature: 0.8,
max_tokens: 500
});
process.stdout.write('AI Response: ');
for await (const chunk of stream) {
if (chunk.choices[0]?.delta?.content) {
process.stdout.write(chunk.choices[0].delta.content);
}
}
console.log(); // New line
}
streamingChat();
See how the AI thinks with the hideThink
parameter:
import { HelpingAI } from 'helpingai';
const client = new HelpingAI({
apiKey: 'your-api-key'
});
async function showReasoning() {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{role: 'user', content: "What's 15 * 24? Show your work."}
],
hideThink: false, // Show reasoning process
temperature: 0.3,
max_tokens: 400
});
console.log(response.choices[0].message.content);
}
showReasoning();
Execute functions during conversations:
import { HelpingAI } from 'helpingai';
const client = new HelpingAI({
apiKey: 'your-api-key'
});
function getWeather(location) {
// Mock weather function
return `The weather in ${location} is sunny and 72°F`;
}
async function toolCalling() {
const tools = [{
type: "function",
function: {
name: "get_weather",
description: "Get current weather for a location",
parameters: {
type: "object",
properties: {
location: {type: "string", description: "City name"}
},
required: ["location"]
}
}
}];
let messages = [
{role: 'user', content: "What's the weather like in Tokyo?"}
];
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: messages,
tools: tools,
tool_choice: "auto"
});
// Handle tool calls
const message = response.choices[0].message;
if (message.tool_calls) {
messages.push(message);
for (const toolCall of message.tool_calls) {
const functionName = toolCall.function.name;
const functionArgs = JSON.parse(toolCall.function.arguments);
let result;
if (functionName === "get_weather") {
result = getWeather(functionArgs.location);
}
messages.push({
role: "tool",
tool_call_id: toolCall.id,
content: result
});
}
// Get final response
const finalResponse = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: messages
});
console.log(finalResponse.choices[0].message.content);
}
}
toolCalling();
List and retrieve information about available models:
import { HelpingAI } from 'helpingai';
const client = new HelpingAI({
apiKey: 'your-api-key'
});
async function listModels() {
// List all models
const models = await client.models.list();
for (const model of models.data) {
console.log(`Model: ${model.id}`);
console.log(`Created: ${model.created}`);
console.log(`Owned by: ${model.owned_by}`);
console.log('---');
}
// Get specific model info
const model = await client.models.retrieve('Dhanishtha-2.0-preview');
console.log(`Model ID: ${model.id}`);
console.log(`Created: ${model.created}`);
}
listModels();
Configure the client with custom settings:
import { HelpingAI } from 'helpingai';
const client = new HelpingAI({
apiKey: 'your-api-key',
baseURL: 'https://api.helpingai.co/v1', // Custom base URL
timeout: 30000, // Request timeout in milliseconds
maxRetries: 3, // Number of retries on failure
defaultHeaders: {
'User-Agent': 'MyApp/1.0'
}
});
Handle API errors gracefully:
import { HelpingAI } from 'helpingai';
const client = new HelpingAI({
apiKey: 'your-api-key'
});
async function handleErrors() {
try {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{role: 'user', content: 'Hello!'}
]
});
console.log(response.choices[0].message.content);
} catch (error) {
if (error.status === 401) {
console.error('Invalid API key');
} else if (error.status === 429) {
console.error('Rate limit exceeded');
} else if (error.status >= 500) {
console.error('Server error');
} else {
console.error('API error:', error.message);
}
}
}
handleErrors();
import { HelpingAI } from 'helpingai';
const client = new HelpingAI({
apiKey: 'your-api-key'
});
async function browserStreaming() {
const responseDiv = document.getElementById('response');
const stream = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{role: 'user', content: 'Write a poem about the ocean'}
],
stream: true,
temperature: 0.8
});
let fullResponse = '';
for await (const chunk of stream) {
if (chunk.choices[0]?.delta?.content) {
fullResponse += chunk.choices[0].delta.content;
responseDiv.textContent = fullResponse;
}
}
}
import React, { useState } from 'react';
import { HelpingAI } from 'helpingai';
const client = new HelpingAI({
apiKey: process.env.REACT_APP_HELPINGAI_API_KEY
});
function ChatComponent() {
const [messages, setMessages] = useState([]);
const [input, setInput] = useState('');
const [loading, setLoading] = useState(false);
const sendMessage = async () => {
if (!input.trim()) return;
const userMessage = { role: 'user', content: input };
const newMessages = [...messages, userMessage];
setMessages(newMessages);
setInput('');
setLoading(true);
try {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: newMessages
});
const assistantMessage = {
role: 'assistant',
content: response.choices[0].message.content
};
setMessages([...newMessages, assistantMessage]);
} catch (error) {
console.error('Error:', error);
} finally {
setLoading(false);
}
};
return (
<div>
<div>
{messages.map((msg, index) => (
<div key={index}>
<strong>{msg.role}:</strong> {msg.content}
</div>
))}
</div>
<input
value={input}
onChange={(e) => setInput(e.target.value)}
onKeyPress={(e) => e.key === 'Enter' && sendMessage()}
disabled={loading}
/>
<button onClick={sendMessage} disabled={loading}>
{loading ? 'Sending...' : 'Send'}
</button>
</div>
);
}
export default ChatComponent;
The SDK includes full TypeScript support:
import { HelpingAI, ChatCompletion, ChatCompletionMessage } from 'helpingai';
const client = new HelpingAI({
apiKey: 'your-api-key'
});
async function typedChat(): Promise<string> {
const response: ChatCompletion = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{ role: 'user', content: 'Hello!' }
]
});
const message: ChatCompletionMessage = response.choices[0].message;
return message.content;
}
interface ChatCompletion {
id: string;
object: string;
created: number;
model: string;
choices: Choice[];
usage: Usage;
}
interface Choice {
index: number;
message: ChatCompletionMessage;
finish_reason: string;
}
interface ChatCompletionMessage {
role: 'system' | 'user' | 'assistant' | 'tool';
content: string;
tool_calls?: ToolCall[];
}
interface Usage {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
}
import { HelpingAI } from 'helpingai';
// Create a single client instance and reuse it
const client = new HelpingAI({
apiKey: 'your-api-key'
});
// Use the same client for multiple requests
async function multipleRequests() {
const promises = [];
for (let i = 0; i < 5; i++) {
promises.push(
client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [{role: 'user', content: `Request ${i}`}]
})
);
}
const responses = await Promise.all(promises);
responses.forEach((response, index) => {
console.log(`Response ${index}: ${response.choices[0].message.content}`);
});
}
class RateLimiter {
constructor(requestsPerMinute = 60) {
this.requests = [];
this.limit = requestsPerMinute;
}
async throttle() {
const now = Date.now();
this.requests = this.requests.filter(time => now - time < 60000);
if (this.requests.length >= this.limit) {
const waitTime = 60000 - (now - this.requests[0]);
await new Promise(resolve => setTimeout(resolve, waitTime));
return this.throttle();
}
this.requests.push(now);
}
}
const rateLimiter = new RateLimiter(60);
async function rateLimitedRequest() {
await rateLimiter.throttle();
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [{role: 'user', content: 'Hello!'}]
});
return response;
}
function estimateTokens(text) {
// Rough estimation: ~4 characters per token
return Math.ceil(text.length / 4);
}
async function managedChat(messages, maxTokens = 4000) {
let totalTokens = 0;
const managedMessages = [];
// Add messages while staying under token limit
for (const message of messages.reverse()) {
const messageTokens = estimateTokens(message.content);
if (totalTokens + messageTokens > maxTokens) break;
managedMessages.unshift(message);
totalTokens += messageTokens;
}
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: managedMessages
});
return response;
}
If you're migrating from the OpenAI JavaScript library:
// Before (OpenAI)
import OpenAI from 'openai';
const openai = new OpenAI({ apiKey: 'openai-key' });
// After (HelpingAI)
import { HelpingAI } from 'helpingai';
const client = new HelpingAI({ apiKey: 'helpingai-key' });
// The API calls remain the same!
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview', // Just change the model
messages: [{role: 'user', content: 'Hello!'}]
});
Find more examples in our GitHub repository: