public class OpenAiStreamingChatModel extends Object implements dev.langchain4j.model.chat.StreamingChatLanguageModel, dev.langchain4j.model.chat.TokenCountEstimator
StreamingResponseHandler
.
You can find description of parameters here.Modifier and Type | Class and Description |
---|---|
static class |
OpenAiStreamingChatModel.OpenAiStreamingChatModelBuilder |
Constructor and Description |
---|
OpenAiStreamingChatModel(String baseUrl,
String apiKey,
String organizationId,
String modelName,
Double temperature,
Double topP,
List<String> stop,
Integer maxTokens,
Double presencePenalty,
Double frequencyPenalty,
Map<String,Integer> logitBias,
String responseFormat,
Integer seed,
String user,
Duration timeout,
Proxy proxy,
Boolean logRequests,
Boolean logResponses,
dev.langchain4j.model.Tokenizer tokenizer) |
Modifier and Type | Method and Description |
---|---|
static OpenAiStreamingChatModel.OpenAiStreamingChatModelBuilder |
builder() |
int |
estimateTokenCount(List<dev.langchain4j.data.message.ChatMessage> messages) |
void |
generate(List<dev.langchain4j.data.message.ChatMessage> messages,
List<dev.langchain4j.agent.tool.ToolSpecification> toolSpecifications,
dev.langchain4j.model.StreamingResponseHandler<dev.langchain4j.data.message.AiMessage> handler) |
void |
generate(List<dev.langchain4j.data.message.ChatMessage> messages,
dev.langchain4j.model.StreamingResponseHandler<dev.langchain4j.data.message.AiMessage> handler) |
void |
generate(List<dev.langchain4j.data.message.ChatMessage> messages,
dev.langchain4j.agent.tool.ToolSpecification toolSpecification,
dev.langchain4j.model.StreamingResponseHandler<dev.langchain4j.data.message.AiMessage> handler) |
static OpenAiStreamingChatModel |
withApiKey(String apiKey) |
public OpenAiStreamingChatModel(String baseUrl, String apiKey, String organizationId, String modelName, Double temperature, Double topP, List<String> stop, Integer maxTokens, Double presencePenalty, Double frequencyPenalty, Map<String,Integer> logitBias, String responseFormat, Integer seed, String user, Duration timeout, Proxy proxy, Boolean logRequests, Boolean logResponses, dev.langchain4j.model.Tokenizer tokenizer)
public void generate(List<dev.langchain4j.data.message.ChatMessage> messages, dev.langchain4j.model.StreamingResponseHandler<dev.langchain4j.data.message.AiMessage> handler)
generate
in interface dev.langchain4j.model.chat.StreamingChatLanguageModel
public void generate(List<dev.langchain4j.data.message.ChatMessage> messages, List<dev.langchain4j.agent.tool.ToolSpecification> toolSpecifications, dev.langchain4j.model.StreamingResponseHandler<dev.langchain4j.data.message.AiMessage> handler)
generate
in interface dev.langchain4j.model.chat.StreamingChatLanguageModel
public void generate(List<dev.langchain4j.data.message.ChatMessage> messages, dev.langchain4j.agent.tool.ToolSpecification toolSpecification, dev.langchain4j.model.StreamingResponseHandler<dev.langchain4j.data.message.AiMessage> handler)
generate
in interface dev.langchain4j.model.chat.StreamingChatLanguageModel
public int estimateTokenCount(List<dev.langchain4j.data.message.ChatMessage> messages)
estimateTokenCount
in interface dev.langchain4j.model.chat.TokenCountEstimator
public static OpenAiStreamingChatModel withApiKey(String apiKey)
public static OpenAiStreamingChatModel.OpenAiStreamingChatModelBuilder builder()
Copyright © 2024. All rights reserved.