Create a new Google Gemini llm_provider()
instance
Source: R/llm_provider.R
llm_provider_google_gemini.Rd
Creates an llm_provider()
object that interacts with the Google Gemini API.
Streaming is not yet supported in this implementation.
Usage
llm_provider_google_gemini(
parameters = list(model = "gemini-1.5-flash"),
verbose = getOption("tidyprompt.verbose", TRUE),
url = "https://generativelanguage.googleapis.com/v1beta/models/",
api_key = Sys.getenv("GOOGLE_AI_STUDIO_API_KEY")
)
Arguments
- parameters
A named list of parameters. Currently the following parameters are required:
model: The name of the model to use (see: https://ai.google.dev/gemini-api/docs/models/gemini)
Additional parameters are appended to the request body; see the Google AI Studio API documentation for more information: https://ai.google.dev/gemini-api/docs/text-generation & https://github.com/google/generative-ai-docs/blob/main/site/en/gemini-api/docs/get-started/rest.ipynb
- verbose
A logical indicating whether the interaction with the LLM provider should be printed to the console
- url
The URL to the Google Gemini API endpoint for chat completion
- api_key
The API key to use for authentication with the Google Gemini API (see: https://aistudio.google.com/app/apikey)
Value
A new llm_provider()
object for use of the Google Gemini API
A new llm_provider()
object for use of the Google Gemini API
See also
Other llm_provider:
llm_provider
,
llm_provider_groq()
,
llm_provider_mistral()
,
llm_provider_ollama()
,
llm_provider_openai()
,
llm_provider_openrouter()
,
llm_provider_xai()
Examples
# Various providers:
ollama <- llm_provider_ollama()
openai <- llm_provider_openai()
openrouter <- llm_provider_openrouter()
mistral <- llm_provider_mistral()
groq <- llm_provider_groq()
xai <- llm_provider_xai()
gemini <- llm_provider_google_gemini()
# Initialize with settings:
ollama <- llm_provider_ollama(
parameters = list(
model = "llama3.2:3b",
stream = TRUE
),
verbose = TRUE,
url = "http://localhost:11434/api/chat"
)
# Change settings:
ollama$verbose <- FALSE
ollama$parameters$stream <- FALSE
ollama$parameters$model <- "llama3.1:8b"
if (FALSE) { # \dontrun{
# Try a simple chat message with '$complete_chat()':
response <- ollama$complete_chat("Hi!")
response
# $role
# [1] "assistant"
#
# $content
# [1] "How's it going? Is there something I can help you with or would you like
# to chat?"
#
# $http
# Response [http://localhost:11434/api/chat]
# Date: 2024-11-18 14:21
# Status: 200
# Content-Type: application/json; charset=utf-8
# Size: 375 B
# Use with send_prompt():
"Hi" |>
send_prompt(ollama)
# [1] "How's your day going so far? Is there something I can help you with or
# would you like to chat?"
} # }
# Various providers:
ollama <- llm_provider_ollama()
openai <- llm_provider_openai()
openrouter <- llm_provider_openrouter()
mistral <- llm_provider_mistral()
groq <- llm_provider_groq()
xai <- llm_provider_xai()
gemini <- llm_provider_google_gemini()
# Initialize with settings:
ollama <- llm_provider_ollama(
parameters = list(
model = "llama3.2:3b",
stream = TRUE
),
verbose = TRUE,
url = "http://localhost:11434/api/chat"
)
# Change settings:
ollama$verbose <- FALSE
ollama$parameters$stream <- FALSE
ollama$parameters$model <- "llama3.1:8b"
if (FALSE) { # \dontrun{
# Try a simple chat message with '$complete_chat()':
response <- ollama$complete_chat("Hi!")
response
# $role
# [1] "assistant"
#
# $content
# [1] "How's it going? Is there something I can help you with or would you like
# to chat?"
#
# $http
# Response [http://localhost:11434/api/chat]
# Date: 2024-11-18 14:21
# Status: 200
# Content-Type: application/json; charset=utf-8
# Size: 375 B
# Use with send_prompt():
"Hi" |>
send_prompt(ollama)
# [1] "How's your day going so far? Is there something I can help you with or
# would you like to chat?"
} # }