Please wait while we prepare your experience
Get up and running with HelpingAI in minutes. This guide will walk you through making your first API call using different methods and programming languages.
First, you'll need an API key. Sign up at helpingai.co and get your API key from the dashboard.
Keep your API key secure! Never expose it in client-side code or public repositories.
All API requests should be made to:
https://api.helpingai.co/v1
Let's make a simple chat completion request. HelpingAI uses the same format as OpenAI's API, making it easy to integrate.
import requests
url = "https://api.helpingai.co/v1/chat/completions"
headers = {
"Authorization": "Bearer YOUR_API_KEY",
"Content-Type": "application/json"
}
data = {
"model": "Dhanishtha-2.0-preview",
"messages": [
{"role": "user", "content": "Hello! I'm excited to try HelpingAI's reasoning capabilities."}
],
"temperature": 0.7,
"max_tokens": 150
}
response = requests.post(url, headers=headers, json=data)
result = response.json()
print(result["choices"][0]["message"]["content"])
from openai import OpenAI
client = OpenAI(
base_url="https://api.helpingai.co/v1",
api_key="YOUR_API_KEY"
)
response = client.chat.completions.create(
model="Dhanishtha-2.0-preview",
messages=[
{"role": "user", "content": "Hello! I'm excited to try HelpingAI's reasoning capabilities."}
],
temperature=0.7,
max_tokens=150
)
print(response.choices[0].message.content)
from helpingai import HelpingAI
client = HelpingAI(api_key="YOUR_API_KEY")
response = client.chat.completions.create(
model="Dhanishtha-2.0-preview",
messages=[
{"role": "user", "content": "Hello! I'm excited to try HelpingAI's reasoning capabilities."}
],
temperature=0.7,
max_tokens=150
)
print(response.choices[0].message.content)
const axios = require("axios");
(async () => {
const response = await axios.post(
"https://api.helpingai.co/v1/chat/completions",
{
model: "Dhanishtha-2.0-preview",
messages: [
{
role: "user",
content:
"Hello! I'm excited to try HelpingAI's reasoning capabilities.",
},
],
temperature: 0.7,
max_tokens: 150,
},
{
headers: {
Authorization: "Bearer YOUR_API_KEY",
"Content-Type": "application/json",
},
}
);
console.log(response.data.choices[0].message.content);
})();
import OpenAI from "openai";
const openai = new OpenAI({
baseURL: "https://api.helpingai.co/v1",
apiKey: "YOUR_API_KEY",
});
async function main() {
const completion = await openai.chat.completions.create({
model: "Dhanishtha-2.0-preview",
messages: [
{
role: "user",
content:
"Hello! I'm excited to try HelpingAI's reasoning capabilities.",
},
],
temperature: 0.7,
max_tokens: 150,
});
console.log(completion.choices[0].message.content);
}
main();
import { HelpingAI } from "helpingai";
const client = new HelpingAI({
apiKey: "YOUR_API_KEY",
});
async function main() {
const completion = await client.chat.completions.create({
model: "Dhanishtha-2.0-preview",
messages: [
{
role: "user",
content:
"Hello! I'm excited to try HelpingAI's reasoning capabilities.",
},
],
temperature: 0.7,
max_tokens: 150,
});
console.log(completion.choices[0].message.content);
}
main();
A typical response looks like this:
{
"id": "chatcmpl-abc123",
"object": "chat.completion",
"created": 1677652288,
"model": "Dhanishtha-2.0-preview",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Hello! I'm excited to help you explore what HelpingAI can do with its advanced reasoning capabilities. I'm here to make sure you have a great experience. What would you like to try first?"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 20,
"completion_tokens": 45,
"total_tokens": 65
}
}
Notice how HelpingAI responds with information about its reasoning capabilities - ready to demonstrate its advanced mid-response reasoning!
"Dhanishtha-2.0-preview"
)role
and content
true
for streaming responsesfalse
to see AI's reasoning processHelpingAI can show you its reasoning process:
response = client.chat.completions.create(
model="Dhanishtha-2.0-preview",
messages=[
{"role": "user", "content": "What's 15 * 24?"}
],
hideThink=False # Shows reasoning in <think> tags
)
Get responses as they're generated:
stream = client.chat.completions.create(
model="Dhanishtha-2.0-preview",
messages=[
{"role": "user", "content": "Tell me a short story"}
],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")
Now that you've made your first API call, explore more advanced features:
Welcome to the future of intelligent reasoning AI! 🚀