from chatformers.chatbot import Chatbot
import os
from openai import OpenAI
os.environ["GROQ_API_KEY"] = "<API_KEY>"
GROQ_API_KEY = "<API_KEY>"
groq_base_url = "https://api.groq.com/openai/v1"
# Unique ID for conversation between Sam (User) and Julia (Chatbot)
user_id = "Sam-Julia"
# Name of the model you want to use
model_name = "llama-3.1-8b-instant"
# Initialize OpenAI client with API key and base URL, we are using LLM from GROQ here, this is required for having conversation with LLM
client = OpenAI(base_url=groq_base_url,
api_key=GROQ_API_KEY,
)
# You can provide character to your chatbot, the type should be dictionary with key value pairs of your choice we will integrate in system prompt or you can leave it empty dictionary
character_data = {"name": "Julia",
"description": "You are on online chatting website, chatting with strangers."}
# Configuration: for configuration you can refer https://docs.mem0.ai/overview, hence chatformers use mem0 for memory and llm management
# Example: https://docs.mem0.ai/examples/mem0-with-ollama
# These configuration will be used for embedded the chats, handling memory creation automatically
config = {
"vector_store": {
"provider": "chroma",
"config": {
"collection_name": "test",
"path": "db",
}
},
"embedder": {
"provider": "ollama",
"config": {
"model": "nomic-embed-text:latest"
}
},
"llm": {
"provider": "groq",
"config": {
"model": model_name,
"temperature": 0.1,
"max_tokens": 4000,
}
},
# "llm": {
# "provider": "ollama", # "config": { # "model": model_name, # "temperature": 0.1, # "max_tokens": 4000, # } # },}
# Initialize Chatbot with LLM client, model name, character data, and configuration
chatbot = Chatbot(llm_client=client, model_name=model_name, character_data=character_data, config=config)
# Optional, if you want to add any memory into vector database at any point, uncomment this line
# memory_messages = [
# {"role": "user", "content": "My name is Sam, what about you?"},
# {"role": "assistant", "content": "Hello Sam! I'm Julia."}
# ]
# chatbot.add_memories(memory_messages, user_id=user_id)
# query is your current question that you want LLM to answer