Skip to content

08 Requests and Responses

Fetching Response

OpenAI

1
2
3
4
5
response = openai.chat.completions.create(  
    model='gpt-4o-mini', 
    messages=messages  
)
response_content = response.choices[0].message.content

Ollama

response = ollama.chat(model='llama3.2', messages=messages)
response_content = response['message']['content']

Claude

1
2
3
4
5
6
7
8
9
response = claude.messages.create(
model='claude-3-5-sonnet-latest',
max_tokens=200,
temperature=0.7, # Controls creativity. High value = High creativity
system=system_prompt,
messages=[
    {'role': 'user', 'content': user_prompt}
])
response_content = response.content[0].text

Gemini

1
2
3
4
5
6
7
request = google.generativeai.GenerativeModel(
model_name='gemini-1.5-flash',
system_instruction=system_prompt
)
response=gemini.generate_content(user_prompt, stream=True)
for chunk in response:
        print(chunk.text, end="", flush=True)

Streaming Response

OpenAI

1
2
3
4
5
6
7
8
9
response = openai.chat.completions.create(  
    model='gpt-4o-mini', 
    messages=messages,
    stream=True
)
for chunk in response:  
    content = chunk.choices[0].delta.content or ''  
    print(content, end='', flush=True)  
    response_content += content

Ollama

1
2
3
4
5
response = ollama.chat(model='llama3.2', messages=messages, stream=True)
for chunk in response:  
    content = chunk.get("message", {}).get("content", "")  
    print(content, end='', flush=True)  
    response_content += content

Claude

response = claude.messages.create(
model='claude-3-5-sonnet-latest',
max_tokens=200,
temperature=0.7, # Controls creativity. High value = High creativity
system=system_prompt,
messages=[
    {'role': 'user', 'content': user_prompt}
])
with response as stream:
    for text in stream.text_stream:
        print(text, end='', flush=True)

Gemini

1
2
3
4
5
6
request = google.generativeai.GenerativeModel(
model_name='gemini-1.5-flash',
system_instruction=system_prompt
)
response=gemini.generate_content(user_prompt)
print(response.text)

References