code
stringlengths 419
47.9k
| apis
sequencelengths 1
7
| extract_api
stringlengths 67
6.13k
|
---|---|---|
package dev.langchain4j.service;
import dev.langchain4j.agent.tool.DefaultToolExecutor;
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.agent.tool.ToolSpecification;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.ToolExecutionResultMessage;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.ChatMemoryProvider;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
import dev.langchain4j.model.input.structured.StructuredPrompt;
import dev.langchain4j.model.moderation.Moderation;
import dev.langchain4j.model.moderation.ModerationModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.rag.DefaultRetrievalAugmentor;
import dev.langchain4j.rag.RetrievalAugmentor;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.retriever.Retriever;
import dev.langchain4j.spi.services.AiServicesFactory;
import java.lang.reflect.Method;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import static dev.langchain4j.agent.tool.ToolSpecifications.toolSpecificationFrom;
import static dev.langchain4j.exception.IllegalConfigurationException.illegalConfiguration;
import static dev.langchain4j.internal.ValidationUtils.ensureNotNull;
import static dev.langchain4j.spi.ServiceHelper.loadFactories;
import static java.util.stream.Collectors.toList;
/**
* AI Services provide a simpler and more flexible alternative to chains.
* You can define your own API (a Java interface with one or more methods),
* and AiServices will provide an implementation for it (we call this "AI Service").
* <p>
* Currently, AI Services support:
* <pre>
* - Prompt templates for user and system messages using {@link UserMessage} and {@link SystemMessage}
* - Structured prompts as method arguments (see {@link StructuredPrompt})
* - Shared or per-user (see {@link MemoryId}) chat memory
* - RAG (see {@link RetrievalAugmentor})
* - Tools (see {@link Tool})
* - Various return types (output parsers), see below
* - Streaming (use {@link TokenStream} as a return type)
* - Auto-moderation using {@link Moderate}
* </pre>
* <p>
* Here is the simplest example of an AI Service:
*
* <pre>
* interface Assistant {
*
* String chat(String userMessage);
* }
*
* Assistant assistant = AiServices.create(Assistant.class, model);
*
* String answer = assistant.chat("hello");
* System.out.println(answer); // Hello, how can I help you today?
* </pre>
*
* <pre>
* The return type of methods in your AI Service can be any of the following:
* - a {@link String}, an {@link AiMessage} or a {@code Response<AiMessage>}, if you want to get the answer from the LLM as-is
* - a {@code List<String>} or {@code Set<String>}, if you want to receive the answer as a collection of items or bullet points
* - any {@link Enum} or a {@code boolean}, if you want to use the LLM for classification
* - a primitive or boxed Java type: {@code int}, {@code Double}, etc., if you want to use the LLM for data extraction
* - many default Java types: {@code Date}, {@code LocalDateTime}, {@code BigDecimal}, etc., if you want to use the LLM for data extraction
* - any custom POJO, if you want to use the LLM for data extraction.
* For POJOs, it is advisable to use the "json mode" feature if the LLM provider supports it. For OpenAI, this can be enabled by calling {@code responseFormat("json_object")} during model construction.
*
* </pre>
* <p>
* Let's see how we can classify the sentiment of a text:
* <pre>
* enum Sentiment {
* POSITIVE, NEUTRAL, NEGATIVE
* }
*
* interface SentimentAnalyzer {
*
* {@code @UserMessage}("Analyze sentiment of {{it}}")
* Sentiment analyzeSentimentOf(String text);
* }
*
* SentimentAnalyzer assistant = AiServices.create(SentimentAnalyzer.class, model);
*
* Sentiment sentiment = analyzeSentimentOf.chat("I love you");
* System.out.println(sentiment); // POSITIVE
* </pre>
* <p>
* As demonstrated, you can put {@link UserMessage} and {@link SystemMessage} annotations above a method to define
* templates for user and system messages, respectively.
* In this example, the special {@code {{it}}} prompt template variable is used because there's only one method parameter.
* However, you can use more parameters as demonstrated in the following example:
* <pre>
* interface Translator {
*
* {@code @SystemMessage}("You are a professional translator into {{language}}")
* {@code @UserMessage}("Translate the following text: {{text}}")
* String translate(@V("text") String text, @V("language") String language);
* }
* </pre>
* <p>
* See more examples <a href="https://github.com/langchain4j/langchain4j-examples/tree/main/other-examples/src/main/java">here</a>.
*
* @param <T> The interface for which AiServices will provide an implementation.
*/
public abstract class AiServices<T> {
protected static final String DEFAULT = "default";
protected final AiServiceContext context;
private boolean retrieverSet = false;
private boolean contentRetrieverSet = false;
private boolean retrievalAugmentorSet = false;
protected AiServices(AiServiceContext context) {
this.context = context;
}
/**
* Creates an AI Service (an implementation of the provided interface), that is backed by the provided chat model.
* This convenience method can be used to create simple AI Services.
* For more complex cases, please use {@link #builder}.
*
* @param aiService The class of the interface to be implemented.
* @param chatLanguageModel The chat model to be used under the hood.
* @return An instance of the provided interface, implementing all its defined methods.
*/
public static <T> T create(Class<T> aiService, ChatLanguageModel chatLanguageModel) {
return builder(aiService)
.chatLanguageModel(chatLanguageModel)
.build();
}
/**
* Creates an AI Service (an implementation of the provided interface), that is backed by the provided streaming chat model.
* This convenience method can be used to create simple AI Services.
* For more complex cases, please use {@link #builder}.
*
* @param aiService The class of the interface to be implemented.
* @param streamingChatLanguageModel The streaming chat model to be used under the hood.
* The return type of all methods should be {@link TokenStream}.
* @return An instance of the provided interface, implementing all its defined methods.
*/
public static <T> T create(Class<T> aiService, StreamingChatLanguageModel streamingChatLanguageModel) {
return builder(aiService)
.streamingChatLanguageModel(streamingChatLanguageModel)
.build();
}
/**
* Begins the construction of an AI Service.
*
* @param aiService The class of the interface to be implemented.
* @return builder
*/
public static <T> AiServices<T> builder(Class<T> aiService) {
AiServiceContext context = new AiServiceContext(aiService);
for (AiServicesFactory factory : loadFactories(AiServicesFactory.class)) {
return factory.create(context);
}
return new DefaultAiServices<>(context);
}
/**
* Configures chat model that will be used under the hood of the AI Service.
* <p>
* Either {@link ChatLanguageModel} or {@link StreamingChatLanguageModel} should be configured,
* but not both at the same time.
*
* @param chatLanguageModel Chat model that will be used under the hood of the AI Service.
* @return builder
*/
public AiServices<T> chatLanguageModel(ChatLanguageModel chatLanguageModel) {
context.chatModel = chatLanguageModel;
return this;
}
/**
* Configures streaming chat model that will be used under the hood of the AI Service.
* The methods of the AI Service must return a {@link TokenStream} type.
* <p>
* Either {@link ChatLanguageModel} or {@link StreamingChatLanguageModel} should be configured,
* but not both at the same time.
*
* @param streamingChatLanguageModel Streaming chat model that will be used under the hood of the AI Service.
* @return builder
*/
public AiServices<T> streamingChatLanguageModel(StreamingChatLanguageModel streamingChatLanguageModel) {
context.streamingChatModel = streamingChatLanguageModel;
return this;
}
/**
* Configures the chat memory that will be used to preserve conversation history between method calls.
* <p>
* Unless a {@link ChatMemory} or {@link ChatMemoryProvider} is configured, all method calls will be independent of each other.
* In other words, the LLM will not remember the conversation from the previous method calls.
* <p>
* The same {@link ChatMemory} instance will be used for every method call.
* <p>
* If you want to have a separate {@link ChatMemory} for each user/conversation, configure {@link #chatMemoryProvider} instead.
* <p>
* Either a {@link ChatMemory} or a {@link ChatMemoryProvider} can be configured, but not both simultaneously.
*
* @param chatMemory An instance of chat memory to be used by the AI Service.
* @return builder
*/
public AiServices<T> chatMemory(ChatMemory chatMemory) {
context.chatMemories = new ConcurrentHashMap<>();
context.chatMemories.put(DEFAULT, chatMemory);
return this;
}
/**
* Configures the chat memory provider, which provides a dedicated instance of {@link ChatMemory} for each user/conversation.
* To distinguish between users/conversations, one of the method's arguments should be a memory ID (of any data type)
* annotated with {@link MemoryId}.
* For each new (previously unseen) memoryId, an instance of {@link ChatMemory} will be automatically obtained
* by invoking {@link ChatMemoryProvider#get(Object id)}.
* Example:
* <pre>
* interface Assistant {
*
* String chat(@MemoryId int memoryId, @UserMessage String message);
* }
* </pre>
* If you prefer to use the same (shared) {@link ChatMemory} for all users/conversations, configure a {@link #chatMemory} instead.
* <p>
* Either a {@link ChatMemory} or a {@link ChatMemoryProvider} can be configured, but not both simultaneously.
*
* @param chatMemoryProvider The provider of a {@link ChatMemory} for each new user/conversation.
* @return builder
*/
public AiServices<T> chatMemoryProvider(ChatMemoryProvider chatMemoryProvider) {
context.chatMemories = new ConcurrentHashMap<>();
context.chatMemoryProvider = chatMemoryProvider;
return this;
}
/**
* Configures a moderation model to be used for automatic content moderation.
* If a method in the AI Service is annotated with {@link Moderate}, the moderation model will be invoked
* to check the user content for any inappropriate or harmful material.
*
* @param moderationModel The moderation model to be used for content moderation.
* @return builder
* @see Moderate
*/
public AiServices<T> moderationModel(ModerationModel moderationModel) {
context.moderationModel = moderationModel;
return this;
}
/**
* Configures the tools that the LLM can use.
* A {@link ChatMemory} that can hold at least 3 messages is required for the tools to work properly.
*
* @param objectsWithTools One or more objects whose methods are annotated with {@link Tool}.
* All these tools (methods annotated with {@link Tool}) will be accessible to the LLM.
* Note that inherited methods are ignored.
* @return builder
* @see Tool
*/
public AiServices<T> tools(Object... objectsWithTools) {
return tools(Arrays.asList(objectsWithTools));
}
/**
* Configures the tools that the LLM can use.
* A {@link ChatMemory} that can hold at least 3 messages is required for the tools to work properly.
*
* @param objectsWithTools A list of objects whose methods are annotated with {@link Tool}.
* All these tools (methods annotated with {@link Tool}) are accessible to the LLM.
* Note that inherited methods are ignored.
* @return builder
* @see Tool
*/
public AiServices<T> tools(List<Object> objectsWithTools) {
context.toolSpecifications = new ArrayList<>();
context.toolExecutors = new HashMap<>();
for (Object objectWithTool : objectsWithTools) {
for (Method method : objectWithTool.getClass().getDeclaredMethods()) {
if (method.isAnnotationPresent(Tool.class)) {
ToolSpecification toolSpecification = toolSpecificationFrom(method);
context.toolSpecifications.add(toolSpecification);
context.toolExecutors.put(toolSpecification.name(), new DefaultToolExecutor(objectWithTool, method));
}
}
}
return this;
}
/**
* Deprecated. Use {@link #contentRetriever(ContentRetriever)}
* (e.g. {@link EmbeddingStoreContentRetriever}) instead.
* <br>
* Configures a retriever that will be invoked on every method call to fetch relevant information
* related to the current user message from an underlying source (e.g., embedding store).
* This relevant information is automatically injected into the message sent to the LLM.
*
* @param retriever The retriever to be used by the AI Service.
* @return builder
*/
@Deprecated
public AiServices<T> retriever(Retriever<TextSegment> retriever) {
if(contentRetrieverSet || retrievalAugmentorSet) {
throw illegalConfiguration("Only one out of [retriever, contentRetriever, retrievalAugmentor] can be set");
}
if (retriever != null) {
AiServices<T> withContentRetriever = contentRetriever(retriever.toContentRetriever());
retrieverSet = true;
return withContentRetriever;
}
return this;
}
/**
* Configures a content retriever to be invoked on every method call for retrieving relevant content
* related to the user's message from an underlying data source
* (e.g., an embedding store in the case of an {@link EmbeddingStoreContentRetriever}).
* The retrieved relevant content is then automatically incorporated into the message sent to the LLM.
* <br>
* This method provides a straightforward approach for those who do not require
* a customized {@link RetrievalAugmentor}.
* It configures a {@link DefaultRetrievalAugmentor} with the provided {@link ContentRetriever}.
*
* @param contentRetriever The content retriever to be used by the AI Service.
* @return builder
*/
public AiServices<T> contentRetriever(ContentRetriever contentRetriever) {
if(retrieverSet || retrievalAugmentorSet) {
throw illegalConfiguration("Only one out of [retriever, contentRetriever, retrievalAugmentor] can be set");
}
contentRetrieverSet = true;
context.retrievalAugmentor = DefaultRetrievalAugmentor.builder()
.contentRetriever(ensureNotNull(contentRetriever, "contentRetriever"))
.build();
return this;
}
/**
* Configures a retrieval augmentor to be invoked on every method call.
*
* @param retrievalAugmentor The retrieval augmentor to be used by the AI Service.
* @return builder
*/
public AiServices<T> retrievalAugmentor(RetrievalAugmentor retrievalAugmentor) {
if(retrieverSet || contentRetrieverSet) {
throw illegalConfiguration("Only one out of [retriever, contentRetriever, retrievalAugmentor] can be set");
}
retrievalAugmentorSet = true;
context.retrievalAugmentor = ensureNotNull(retrievalAugmentor, "retrievalAugmentor");
return this;
}
/**
* Constructs and returns the AI Service.
*
* @return An instance of the AI Service implementing the specified interface.
*/
public abstract T build();
protected void performBasicValidation() {
if (context.chatModel == null && context.streamingChatModel == null) {
throw illegalConfiguration("Please specify either chatLanguageModel or streamingChatLanguageModel");
}
if (context.toolSpecifications != null && !context.hasChatMemory()) {
throw illegalConfiguration(
"Please set up chatMemory or chatMemoryProvider in order to use tools. "
+ "A ChatMemory that can hold at least 3 messages is required for the tools to work properly. "
+ "While the LLM can technically execute a tool without chat memory, if it only receives the " +
"result of the tool's execution without the initial message from the user, it won't interpret " +
"the result properly."
);
}
}
public static List<ChatMessage> removeToolMessages(List<ChatMessage> messages) {
return messages.stream()
.filter(it -> !(it instanceof ToolExecutionResultMessage))
.filter(it -> !(it instanceof AiMessage && ((AiMessage) it).hasToolExecutionRequests()))
.collect(toList());
}
public static void verifyModerationIfNeeded(Future<Moderation> moderationFuture) {
if (moderationFuture != null) {
try {
Moderation moderation = moderationFuture.get();
if (moderation.flagged()) {
throw new ModerationException(String.format("Text \"%s\" violates content policy", moderation.flaggedText()));
}
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
}
}
| [
"dev.langchain4j.rag.DefaultRetrievalAugmentor.builder"
] | [((15779, 15926), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder'), ((15779, 15901), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder')] |
package org.mfusco;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.service.AiServices;
import static java.time.Duration.ofSeconds;
public class MortgageChat {
private final ChatLanguageModel model;
private final PersonExtractor extractor;
private final DroolsMortgageCalculator droolsMortgageCalculator = new DroolsMortgageCalculator();
private final Assistant assistant;
public MortgageChat(String openAiApiKey) {
model = OpenAiChatModel.builder()
.apiKey(openAiApiKey)
.timeout(ofSeconds(60))
.build();
extractor = AiServices.create(PersonExtractor.class, model);
assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(model)
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.tools(droolsMortgageCalculator)
.build();
}
public String chat(String text) {
return text.endsWith("?") ? assistant.chat(text) : extractPerson(text);
}
private String extractPerson(String text) {
Person person = extractor.extractPersonFrom(text);
droolsMortgageCalculator.register(person);
return person.toString();
}
}
| [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((601, 729), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((601, 704), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((601, 664), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((822, 1046), 'dev.langchain4j.service.AiServices.builder'), ((822, 1021), 'dev.langchain4j.service.AiServices.builder'), ((822, 972), 'dev.langchain4j.service.AiServices.builder'), ((822, 899), 'dev.langchain4j.service.AiServices.builder')] |
package com.moyz.adi.common.service;
import com.moyz.adi.common.helper.LLMContext;
import com.moyz.adi.common.interfaces.TriConsumer;
import com.moyz.adi.common.util.AdiPgVectorEmbeddingStore;
import com.moyz.adi.common.vo.AnswerMeta;
import com.moyz.adi.common.vo.PromptMeta;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.store.embedding.EmbeddingMatch;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.commons.lang3.tuple.Triple;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
import static java.util.stream.Collectors.joining;
@Slf4j
@Service
public class RAGService {
@Value("${spring.datasource.url}")
private String dataBaseUrl;
@Value("${spring.datasource.username}")
private String dataBaseUserName;
@Value("${spring.datasource.password}")
private String dataBasePassword;
private static final PromptTemplate promptTemplate = PromptTemplate.from("尽可能准确地回答下面的问题: {{question}}\n\n根据以下知识库的内容:\n{{information}}");
private EmbeddingModel embeddingModel;
private EmbeddingStore<TextSegment> embeddingStore;
public void init() {
log.info("initEmbeddingModel");
embeddingModel = new AllMiniLmL6V2EmbeddingModel();
embeddingStore = initEmbeddingStore();
}
private EmbeddingStore<TextSegment> initEmbeddingStore() {
// 正则表达式匹配
String regex = "jdbc:postgresql://([^:/]+):(\\d+)/(\\w+).+";
Pattern pattern = Pattern.compile(regex);
Matcher matcher = pattern.matcher(dataBaseUrl);
String host = "";
String port = "";
String databaseName = "";
if (matcher.matches()) {
host = matcher.group(1);
port = matcher.group(2);
databaseName = matcher.group(3);
System.out.println("Host: " + host);
System.out.println("Port: " + port);
System.out.println("Database: " + databaseName);
} else {
throw new RuntimeException("parse url error");
}
AdiPgVectorEmbeddingStore embeddingStore = AdiPgVectorEmbeddingStore.builder()
.host(host)
.port(Integer.parseInt(port))
.database(databaseName)
.user(dataBaseUserName)
.password(dataBasePassword)
.dimension(384)
.createTable(true)
.dropTableFirst(false)
.table("adi_knowledge_base_embedding")
.build();
return embeddingStore;
}
private EmbeddingStoreIngestor getEmbeddingStoreIngestor() {
DocumentSplitter documentSplitter = DocumentSplitters.recursive(1000, 0, new OpenAiTokenizer(GPT_3_5_TURBO));
EmbeddingStoreIngestor embeddingStoreIngestor = EmbeddingStoreIngestor.builder()
.documentSplitter(documentSplitter)
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
return embeddingStoreIngestor;
}
/**
* 对文档切块并向量化
*
* @param document 知识库文档
*/
public void ingest(Document document) {
getEmbeddingStoreIngestor().ingest(document);
}
public Prompt retrieveAndCreatePrompt(String kbUuid, String question) {
// Embed the question
Embedding questionEmbedding = embeddingModel.embed(question).content();
// Find relevant embeddings in embedding store by semantic similarity
// You can play with parameters below to find a sweet spot for your specific use case
int maxResults = 3;
double minScore = 0.6;
List<EmbeddingMatch<TextSegment>> relevantEmbeddings = ((AdiPgVectorEmbeddingStore) embeddingStore).findRelevantByKbUuid(kbUuid, questionEmbedding, maxResults, minScore);
// Create a prompt for the model that includes question and relevant embeddings
String information = relevantEmbeddings.stream()
.map(match -> match.embedded().text())
.collect(joining("\n\n"));
if (StringUtils.isBlank(information)) {
return null;
}
return promptTemplate.apply(Map.of("question", question, "information", Matcher.quoteReplacement(information)));
}
/**
* 召回并提问
*
* @param kbUuid 知识库uuid
* @param question 用户的问题
* @param modelName LLM model name
* @return
*/
public Pair<String, Response<AiMessage>> retrieveAndAsk(String kbUuid, String question, String modelName) {
Prompt prompt = retrieveAndCreatePrompt(kbUuid, question);
if (null == prompt) {
return null;
}
Response<AiMessage> response = new LLMContext(modelName).getLLMService().chat(prompt.toUserMessage());
return new ImmutablePair<>(prompt.text(), response);
}
}
| [
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((3196, 3615), 'com.moyz.adi.common.util.AdiPgVectorEmbeddingStore.builder'), ((3196, 3590), 'com.moyz.adi.common.util.AdiPgVectorEmbeddingStore.builder'), ((3196, 3535), 'com.moyz.adi.common.util.AdiPgVectorEmbeddingStore.builder'), ((3196, 3496), 'com.moyz.adi.common.util.AdiPgVectorEmbeddingStore.builder'), ((3196, 3461), 'com.moyz.adi.common.util.AdiPgVectorEmbeddingStore.builder'), ((3196, 3429), 'com.moyz.adi.common.util.AdiPgVectorEmbeddingStore.builder'), ((3196, 3385), 'com.moyz.adi.common.util.AdiPgVectorEmbeddingStore.builder'), ((3196, 3345), 'com.moyz.adi.common.util.AdiPgVectorEmbeddingStore.builder'), ((3196, 3305), 'com.moyz.adi.common.util.AdiPgVectorEmbeddingStore.builder'), ((3196, 3259), 'com.moyz.adi.common.util.AdiPgVectorEmbeddingStore.builder'), ((3894, 4099), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3894, 4074), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3894, 4026), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3894, 3978), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
package dev.zbendhiba.demo.telegram.openapi;
import java.util.List;
import dev.langchain4j.chain.ConversationalRetrievalChain;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.openai.OpenAiChatModel;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import jakarta.enterprise.context.ApplicationScoped;
import static java.time.Duration.ofSeconds;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.telegram.model.IncomingMessage;
import org.eclipse.microprofile.config.inject.ConfigProperty;
@ApplicationScoped
public class Routes extends RouteBuilder {
@ConfigProperty(name="open-api-key")
String openApiKey;
private EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
private EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
@Override
public void configure() throws Exception {
// REST endpoint to add a bio
rest("data")
.post("/camel-split-ingest/")
.to("direct:camel-split-ingest")
.post("/langchain4j-split-ingest/")
.to("direct:langchain4j-split-ingest");
// Ingest Data
from("direct:camel-split-ingest")
.wireTap("direct:processBio")
.transform().simple("Thanks");
from("direct:processBio")
// split into paragraphs and use OpenApiTokenizer
.split(body().tokenize("\\s*\\n\\s*\\n"))
.setHeader("paragraphNumber", simple("${exchangeProperty.CamelSplitIndex}"))
// Process each paragraph using the OpenAiTokenizerProcessor
.process(new CamelSplitterProcessor())
.to("direct:processTokenizedPart")
.end();
// Embed paragraphs into Vector Database
from("direct:processTokenizedPart")
.process(exchange -> {
embed(exchange.getIn().getBody(List.class));
});
from("direct:process-langchain4j-split-ingest")
.process(new LangchainSplitterProcessor())
.to("direct:processTokenizedPart");
from("direct:langchain4j-split-ingest")
.wireTap("direct:process-langchain4j-split-ingest")
.transform().simple("Thanks");
ChatLanguageModel model = OpenAiChatModel.builder()
.apiKey(openApiKey)
.modelName(GPT_3_5_TURBO)
.temperature(0.3)
.timeout(ofSeconds(3000))
.build();
ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder()
.chatLanguageModel(model)
.retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel))
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.promptTemplate(PromptTemplate
.from("Answer the following question to the best of your ability: {{question}}\n\nBase your answer on the following information:\n{{information}}"))
.build();
from("telegram:bots?timeout=30000")
.log("Text received in Telegram : ${body}")
// this is just a Hello World, we suppose that we receive only text messages from user
.filter(simple("${body} != '/start'"))
.process(e->{
IncomingMessage incomingMessage = e.getMessage().getBody(IncomingMessage.class);
var openapiMessage = chain.execute(incomingMessage.getText());
e.getMessage().setBody(openapiMessage);
})
.log("Text to send to user based on response from ChatGPT : ${body}")
.to("telegram:bots")
.end();
}
public void embed(List<TextSegment> textSegments ) {
List<Embedding> embeddings = embeddingModel.embedAll(textSegments).content();
embeddingStore.addAll(embeddings, textSegments);
}
}
| [
"dev.langchain4j.chain.ConversationalRetrievalChain.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((2918, 3122), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2918, 3097), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2918, 3055), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2918, 3021), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2918, 2979), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3171, 3658), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3171, 3633), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3171, 3413), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3171, 3340), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3171, 3251), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')] |
package eu.luminis.faqlangchain.service;
import java.io.File;
import java.io.FileNotFoundException;
import java.time.Duration;
import java.util.Arrays;
import java.util.stream.Collectors;
import com.fasterxml.jackson.databind.JsonNode;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.io.FileSystemResource;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.client.MultipartBodyBuilder;
import org.springframework.stereotype.Service;
import org.springframework.util.ResourceUtils;
import org.springframework.web.reactive.function.BodyInserters;
import org.springframework.web.reactive.function.client.WebClient;
import reactor.core.publisher.Mono;
@Service
public class IngestService {
private static final Logger LOGGER = LoggerFactory.getLogger(IngestService.class);
private final WebClient webClient;
private final EmbeddingStore<TextSegment> embeddingStore;
private final EmbeddingModel embeddingModel;
public IngestService(@Value("${unstructured.apiKey}") String unstructuredApiKey,
@Qualifier("openaiModel") EmbeddingModel embeddingModel,
@Qualifier("inMemoryEmbeddingStore") EmbeddingStore<TextSegment> embeddingStore) {
this.embeddingModel = embeddingModel;
this.embeddingStore = embeddingStore;
this.webClient = WebClient.builder()
.baseUrl("https://api.unstructured.io/general/v0/")
.defaultHeader("unstructured-api-key", unstructuredApiKey)
.build();
}
public boolean ingestPDF() throws FileNotFoundException {
LOGGER.info("Ingesting PDF");
File file = ResourceUtils.getFile("classpath:data/faq.pdf");
MultipartBodyBuilder builder = new MultipartBodyBuilder();
builder.part("files", new FileSystemResource(file));
builder.part("strategy", "ocr_only");
builder.part("ocr_languages", "eng");
Mono<Object> mono = webClient.post()
.uri("general")
.contentType(MediaType.MULTIPART_FORM_DATA)
.body(BodyInserters.fromMultipartData(builder.build()))
.exchangeToMono(response -> {
if (response.statusCode().equals(HttpStatus.OK)) {
return response.bodyToMono(UnstructuredResponse[].class);
} else {
LOGGER.error("Something went wrong when uploading file to Unstructured API. Received status code {}", response.statusCode());
return response.bodyToMono(JsonNode.class);
}
});
Object response = mono.block(Duration.ofMinutes(1));
if (response instanceof JsonNode jsonNode) {
LOGGER.error("Response: {}", jsonNode);
return false;
}
if (response instanceof UnstructuredResponse[] unstructuredResponses) {
String text = Arrays.stream(unstructuredResponses).map(UnstructuredResponse::getText).collect(Collectors.joining(" "));
Document document = Document.from(text);
DocumentSplitter documentSplitter = DocumentSplitters.recursive(300);
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(documentSplitter)
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
ingestor.ingest(document);
LOGGER.info("Ingestion of PDF finished");
return true;
}
return false;
}
}
| [
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((1939, 2126), 'org.springframework.web.reactive.function.client.WebClient.builder'), ((1939, 2101), 'org.springframework.web.reactive.function.client.WebClient.builder'), ((1939, 2026), 'org.springframework.web.reactive.function.client.WebClient.builder'), ((3531, 3635), 'java.util.Arrays.stream'), ((3531, 3602), 'java.util.Arrays.stream'), ((3819, 4040), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3819, 4011), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3819, 3959), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3819, 3907), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
package org.agoncal.fascicle.langchain4j.accessing.vertexai;
import dev.langchain4j.model.vertexai.VertexAiChatModel;
// tag::adocSkip[]
/**
* @author Antonio Goncalves
* http://www.antoniogoncalves.org
* --
*/
// end::adocSkip[]
public class MusicianService {
public static void main(String[] args) {
MusicianService musicianService = new MusicianService();
musicianService.useVertexAiLanguageModelBuilder();
}
private static final String AZURE_OPENAI_KEY = System.getenv("AZURE_OPENAI_KEY");
private static final String AZURE_OPENAI_ENDPOINT = System.getenv("AZURE_OPENAI_ENDPOINT");
private static final String AZURE_OPENAI_DEPLOYMENT_NAME = System.getenv("AZURE_OPENAI_DEPLOYMENT_NAME");
private static final String PROMPT = "When was the first Beatles album released?";
// ###################################
// ### AZURE OPENAI LANGUAGE MODEL ###
// ###################################
public void useVertexAiLanguageModelBuilder() {
System.out.println("### useVertexAiLanguageModelBuilder");
// tag::adocSnippet[]
VertexAiChatModel model = VertexAiChatModel.builder()
.endpoint(AZURE_OPENAI_ENDPOINT)
.temperature(0.3)
.build();
// end::adocSnippet[]
String completion = model.generate(PROMPT);
}
}
| [
"dev.langchain4j.model.vertexai.VertexAiChatModel.builder"
] | [((1100, 1205), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((1100, 1190), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((1100, 1166), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder')] |
package com.example.application;
import com.example.application.services.BookingTools;
import com.example.application.services.CustomerSupportAgent;
import com.vaadin.flow.component.page.AppShellConfigurator;
import com.vaadin.flow.theme.Theme;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.TokenWindowChatMemory;
import dev.langchain4j.model.Tokenizer;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import org.springframework.core.io.Resource;
import org.springframework.core.io.ResourceLoader;
import java.io.IOException;
import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_4;
@SpringBootApplication
@Theme(value = "customer-service-chatbot")
public class Application implements AppShellConfigurator {
public static void main(String[] args) {
SpringApplication.run(Application.class, args);
}
@Bean
EmbeddingModel embeddingModel() {
return new AllMiniLmL6V2EmbeddingModel();
}
@Bean
EmbeddingStore<TextSegment> embeddingStore() {
return new InMemoryEmbeddingStore<>();
}
@Bean
Tokenizer tokenizer() {
return new OpenAiTokenizer(GPT_3_5_TURBO);
}
// In the real world, ingesting documents would often happen separately, on a CI server or similar
@Bean
CommandLineRunner docsToEmbeddings(
EmbeddingModel embeddingModel,
EmbeddingStore<TextSegment> embeddingStore,
Tokenizer tokenizer,
ResourceLoader resourceLoader
) throws IOException {
return args -> {
Resource resource =
resourceLoader.getResource("classpath:terms-of-service.txt");
var termsOfUse = loadDocument(resource.getFile().toPath(), new TextDocumentParser());
DocumentSplitter documentSplitter = DocumentSplitters.recursive(200, 0,
tokenizer);
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(documentSplitter)
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
ingestor.ingest(termsOfUse);
};
}
@Bean
StreamingChatLanguageModel chatLanguageModel() {
return OpenAiStreamingChatModel.builder()
.apiKey(ApiKeys.OPENAI_API_KEY)
.modelName(GPT_3_5_TURBO)
.build();
}
@Bean
ContentRetriever retriever(
EmbeddingStore<TextSegment> embeddingStore,
EmbeddingModel embeddingModel
) {
return EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.maxResults(2)
.minScore(0.6)
.build();
}
@Bean
CustomerSupportAgent customerSupportAgent(
StreamingChatLanguageModel chatLanguageModel,
Tokenizer tokenizer,
ContentRetriever retriever,
BookingTools tools
) {
return AiServices.builder(CustomerSupportAgent.class)
.streamingChatLanguageModel(chatLanguageModel)
.chatMemoryProvider(chatId -> TokenWindowChatMemory.builder()
.id(chatId)
.maxTokens(1000, tokenizer)
.build())
.contentRetriever(retriever)
.tools(tools)
.build();
}
} | [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder",
"dev.langchain4j.memory.chat.TokenWindowChatMemory.builder",
"dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((3196, 3417), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3196, 3388), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3196, 3336), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3196, 3284), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3556, 3705), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((3556, 3680), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((3556, 3638), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((3878, 4101), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3878, 4076), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3878, 4045), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3878, 4014), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3878, 3966), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((4354, 4763), 'dev.langchain4j.service.AiServices.builder'), ((4354, 4738), 'dev.langchain4j.service.AiServices.builder'), ((4354, 4708), 'dev.langchain4j.service.AiServices.builder'), ((4354, 4663), 'dev.langchain4j.service.AiServices.builder'), ((4354, 4463), 'dev.langchain4j.service.AiServices.builder'), ((4510, 4662), 'dev.langchain4j.memory.chat.TokenWindowChatMemory.builder'), ((4510, 4629), 'dev.langchain4j.memory.chat.TokenWindowChatMemory.builder'), ((4510, 4577), 'dev.langchain4j.memory.chat.TokenWindowChatMemory.builder')] |
package com.tencent.supersonic.headless.core.chat.parser.llm;
import com.tencent.supersonic.common.util.JsonUtil;
import com.tencent.supersonic.headless.core.config.OptimizationConfig;
import com.tencent.supersonic.headless.core.chat.query.llm.s2sql.LLMReq;
import com.tencent.supersonic.headless.core.chat.query.llm.s2sql.LLMReq.SqlGenerationMode;
import com.tencent.supersonic.headless.core.chat.query.llm.s2sql.LLMResp;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.output.Response;
import lombok.extern.slf4j.Slf4j;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Service
@Slf4j
public class TwoPassSqlGeneration implements SqlGeneration, InitializingBean {
private static final Logger keyPipelineLog = LoggerFactory.getLogger("keyPipeline");
@Autowired
private ChatLanguageModel chatLanguageModel;
@Autowired
private SqlExamplarLoader sqlExamplarLoader;
@Autowired
private OptimizationConfig optimizationConfig;
@Autowired
private SqlPromptGenerator sqlPromptGenerator;
@Override
public LLMResp generation(LLMReq llmReq, Long dataSetId) {
keyPipelineLog.info("dataSetId:{},llmReq:{}", dataSetId, llmReq);
List<Map<String, String>> sqlExamples = sqlExamplarLoader.retrieverSqlExamples(llmReq.getQueryText(),
optimizationConfig.getText2sqlExampleNum());
String linkingPromptStr = sqlPromptGenerator.generateLinkingPrompt(llmReq, sqlExamples);
Prompt prompt = PromptTemplate.from(JsonUtil.toString(linkingPromptStr)).apply(new HashMap<>());
keyPipelineLog.info("step one request prompt:{}", prompt.toSystemMessage());
Response<AiMessage> response = chatLanguageModel.generate(prompt.toSystemMessage());
keyPipelineLog.info("step one model response:{}", response.content().text());
String schemaLinkStr = OutputFormat.getSchemaLink(response.content().text());
String generateSqlPrompt = sqlPromptGenerator.generateSqlPrompt(llmReq, schemaLinkStr, sqlExamples);
Prompt sqlPrompt = PromptTemplate.from(JsonUtil.toString(generateSqlPrompt)).apply(new HashMap<>());
keyPipelineLog.info("step two request prompt:{}", sqlPrompt.toSystemMessage());
Response<AiMessage> sqlResult = chatLanguageModel.generate(sqlPrompt.toSystemMessage());
String result = sqlResult.content().text();
keyPipelineLog.info("step two model response:{}", result);
Map<String, Double> sqlMap = new HashMap<>();
sqlMap.put(result, 1D);
keyPipelineLog.info("schemaLinkStr:{},sqlMap:{}", schemaLinkStr, sqlMap);
LLMResp llmResp = new LLMResp();
llmResp.setQuery(llmReq.getQueryText());
llmResp.setSqlRespMap(OutputFormat.buildSqlRespMap(sqlExamples, sqlMap));
return llmResp;
}
@Override
public void afterPropertiesSet() {
SqlGenerationFactory.addSqlGenerationForFactory(SqlGenerationMode.TWO_PASS_AUTO_COT, this);
}
}
| [
"dev.langchain4j.model.input.PromptTemplate.from"
] | [((1891, 1970), 'dev.langchain4j.model.input.PromptTemplate.from'), ((2459, 2539), 'dev.langchain4j.model.input.PromptTemplate.from')] |
package com.sg.chatbot.service;
import org.springframework.http.codec.ServerSentEvent;
import dev.langchain4j.memory.chat.TokenWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.TokenStream;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Sinks;
@Service
public class ChatService {
private String openaiApiKey = "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";
private Assistant assistant;
private StreamingAssistant streamingAssistant;
interface Assistant {
String chat(String message);
}
interface StreamingAssistant {
TokenStream chat(String message);
}
public ChatService(){
if (openaiApiKey == null) {
System.err
.println("ERROR: OPENAI_API_KEY environment variable is not set. Please set it to your OpenAI API key.");
}
var memory = TokenWindowChatMemory.withMaxTokens(2000, new OpenAiTokenizer("gpt-3.5-turbo"));
assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(openaiApiKey))
.chatMemory(memory)
.build();
streamingAssistant = AiServices.builder(StreamingAssistant.class)
.streamingChatLanguageModel(OpenAiStreamingChatModel.withApiKey(openaiApiKey))
.chatMemory(memory)
.build();
}
public String chat(String message) {
System.out.println(message);
return assistant.chat(message);
}
public Flux<ServerSentEvent<String>> chatStream(String message) {
Sinks.Many<String> sink = Sinks.many().unicast().onBackpressureBuffer();
streamingAssistant.chat(message)
.onNext(sink::tryEmitNext)
.onComplete(c -> sink.tryEmitComplete())
.onError(sink::tryEmitError)
.start();
return sink.asFlux().map(mes -> ServerSentEvent.<String>builder()
.event("chat")
.data(mes)
.build());
}
} | [
"dev.langchain4j.service.AiServices.builder"
] | [((1177, 1326), 'dev.langchain4j.service.AiServices.builder'), ((1177, 1309), 'dev.langchain4j.service.AiServices.builder'), ((1177, 1281), 'dev.langchain4j.service.AiServices.builder'), ((1354, 1530), 'dev.langchain4j.service.AiServices.builder'), ((1354, 1513), 'dev.langchain4j.service.AiServices.builder'), ((1354, 1485), 'dev.langchain4j.service.AiServices.builder'), ((1748, 1793), 'reactor.core.publisher.Sinks.many'), ((1748, 1770), 'reactor.core.publisher.Sinks.many'), ((2009, 2107), 'org.springframework.http.codec.ServerSentEvent.<String>builder'), ((2009, 2090), 'org.springframework.http.codec.ServerSentEvent.<String>builder'), ((2009, 2065), 'org.springframework.http.codec.ServerSentEvent.<String>builder')] |
package dev.langchain4j.model.azure;
import com.azure.ai.openai.models.*;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.model.Tokenizer;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.model.output.TokenUsage;
import java.util.List;
import static dev.langchain4j.model.azure.InternalAzureOpenAiHelper.finishReasonFrom;
import static java.util.Collections.singletonList;
/**
* This class needs to be thread safe because it is called when a streaming result comes back
* and there is no guarantee that this thread will be the same as the one that initiated the request,
* in fact it almost certainly won't be.
*/
class AzureOpenAiStreamingResponseBuilder {
private final StringBuffer contentBuilder = new StringBuffer();
private final StringBuffer toolNameBuilder = new StringBuffer();
private final StringBuffer toolArgumentsBuilder = new StringBuffer();
private volatile CompletionsFinishReason finishReason;
private final Integer inputTokenCount;
public AzureOpenAiStreamingResponseBuilder(Integer inputTokenCount) {
this.inputTokenCount = inputTokenCount;
}
public void append(ChatCompletions completions) {
if (completions == null) {
return;
}
List<ChatChoice> choices = completions.getChoices();
if (choices == null || choices.isEmpty()) {
return;
}
ChatChoice chatCompletionChoice = choices.get(0);
if (chatCompletionChoice == null) {
return;
}
CompletionsFinishReason finishReason = chatCompletionChoice.getFinishReason();
if (finishReason != null) {
this.finishReason = finishReason;
}
com.azure.ai.openai.models.ChatResponseMessage delta = chatCompletionChoice.getDelta();
if (delta == null) {
return;
}
String content = delta.getContent();
if (content != null) {
contentBuilder.append(content);
return;
}
FunctionCall functionCall = delta.getFunctionCall();
if (functionCall != null) {
if (functionCall.getName() != null) {
toolNameBuilder.append(functionCall.getName());
}
if (functionCall.getArguments() != null) {
toolArgumentsBuilder.append(functionCall.getArguments());
}
}
}
public void append(Completions completions) {
if (completions == null) {
return;
}
List<Choice> choices = completions.getChoices();
if (choices == null || choices.isEmpty()) {
return;
}
Choice completionChoice = choices.get(0);
if (completionChoice == null) {
return;
}
CompletionsFinishReason completionsFinishReason = completionChoice.getFinishReason();
if (completionsFinishReason != null) {
this.finishReason = completionsFinishReason;
}
String token = completionChoice.getText();
if (token != null) {
contentBuilder.append(token);
}
}
public Response<AiMessage> build(Tokenizer tokenizer, boolean forcefulToolExecution) {
String content = contentBuilder.toString();
if (!content.isEmpty()) {
return Response.from(
AiMessage.from(content),
tokenUsage(content, tokenizer),
finishReasonFrom(finishReason)
);
}
String toolName = toolNameBuilder.toString();
if (!toolName.isEmpty()) {
ToolExecutionRequest toolExecutionRequest = ToolExecutionRequest.builder()
.name(toolName)
.arguments(toolArgumentsBuilder.toString())
.build();
return Response.from(
AiMessage.from(toolExecutionRequest),
tokenUsage(toolExecutionRequest, tokenizer, forcefulToolExecution),
finishReasonFrom(finishReason)
);
}
return null;
}
private TokenUsage tokenUsage(String content, Tokenizer tokenizer) {
if (tokenizer == null) {
return null;
}
int outputTokenCount = tokenizer.estimateTokenCountInText(content);
return new TokenUsage(inputTokenCount, outputTokenCount);
}
private TokenUsage tokenUsage(ToolExecutionRequest toolExecutionRequest, Tokenizer tokenizer, boolean forcefulToolExecution) {
if (tokenizer == null) {
return null;
}
int outputTokenCount = 0;
if (forcefulToolExecution) {
// OpenAI calculates output tokens differently when tool is executed forcefully
outputTokenCount += tokenizer.estimateTokenCountInForcefulToolExecutionRequest(toolExecutionRequest);
} else {
outputTokenCount = tokenizer.estimateTokenCountInToolExecutionRequests(singletonList(toolExecutionRequest));
}
return new TokenUsage(inputTokenCount, outputTokenCount);
}
}
| [
"dev.langchain4j.agent.tool.ToolExecutionRequest.builder"
] | [((3735, 3894), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((3735, 3865), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((3735, 3801), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder')] |
package dev.nano.sbot.configuration;
import dev.langchain4j.chain.ConversationalRetrievalChain;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import dev.nano.sbot.retriever.EmbeddingStoreLoggingRetriever;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.time.Duration;
import java.util.List;
import static dev.nano.sbot.constant.Constants.PROMPT_TEMPLATE_2;
@Configuration
@RequiredArgsConstructor
@Slf4j
public class LangChainConfiguration {
@Value("${langchain.api.key}")
private String apiKey;
@Value("${langchain.timeout}")
private Long timeout;
private final List<Document> documents;
@Bean
public ConversationalRetrievalChain chain() {
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(DocumentSplitters.recursive(500, 0))
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
log.info("Ingesting Spring Boot Resources ...");
ingestor.ingest(documents);
log.info("Ingested {} documents", documents.size());
EmbeddingStoreRetriever retriever = EmbeddingStoreRetriever.from(embeddingStore, embeddingModel);
EmbeddingStoreLoggingRetriever loggingRetriever = new EmbeddingStoreLoggingRetriever(retriever);
/*MessageWindowChatMemory chatMemory = MessageWindowChatMemory.builder()
.maxMessages(10)
.build();*/
log.info("Building ConversationalRetrievalChain ...");
ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder()
.chatLanguageModel(OpenAiChatModel.builder()
.apiKey(apiKey)
.timeout(Duration.ofSeconds(timeout))
.build()
)
.promptTemplate(PromptTemplate.from(PROMPT_TEMPLATE_2))
//.chatMemory(chatMemory)
.retriever(loggingRetriever)
.build();
log.info("Spring Boot knowledge base is ready!");
return chain;
}
}
| [
"dev.langchain4j.chain.ConversationalRetrievalChain.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((1682, 1906), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1682, 1881), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1682, 1833), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1682, 1785), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2530, 2966), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2530, 2941), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2530, 2854), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2530, 2782), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2604, 2764), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2604, 2731), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2604, 2669), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
package com.nexus.backend.service;
import com.nexus.backend.dto.UserTender;
import com.nexus.backend.entity.Act;
import com.nexus.backend.entity.Tender;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.openai.OpenAiChatModel;
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.Map;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
@Service
public class AiService {
public void testGpt(){
PromptTemplate promptTemplate = PromptTemplate
.from("Tell me a {{adjective}} joke about {{content}}..");
Map<String, Object> variables = new HashMap<>();
variables.put("adjective", "funny");
variables.put("content", "computers");
Prompt prompt = promptTemplate.apply(variables);
ChatLanguageModel model = OpenAiChatModel.builder()
.apiKey("KEY").modelName(GPT_3_5_TURBO)
.temperature(0.3)
.build();
String response = model.generate(prompt.text());
System.out.println(response);
}
public String checkIfCompliant(Act act, UserTender userTender) {
PromptTemplate promptTemplate = PromptTemplate
.from("This is a government act with a set of compliances {{act}}, With keeping this above act in mind, tell me if my tender/plan seems broadly compliant or not. " +
"Consider this tender/plan: {{tender}}" +
"Let me know if there are any shortcomings and where the tender/plan is not compliant. Also tell me about penalties.");
Map<String, Object> variables = new HashMap<>();
variables.put("act", act);
variables.put("tender", userTender);
Prompt prompt = promptTemplate.apply(variables);
ChatLanguageModel model = OpenAiChatModel.builder()
.apiKey("API_KEY")
.modelName(GPT_3_5_TURBO)
.temperature(0.3)
.build();
String response = model.generate(prompt.text());
System.out.println(response);
return response;
}
public void Summarise(){
}
public String checkIfTenderIsCompliant(Tender tender, String userTender) {
PromptTemplate promptTemplate = PromptTemplate
.from("This is a government Tender with a set of compliances {{tender}}. With keeping this above act in mind, tell me if my tender seems broadly compliant or not. " +
"Consider this tender/plan: {{userTender}}" +
"Let me know if there are any shortcomings and where the tender is not compliant. Also tell me about penalties.");
Map<String, Object> variables = new HashMap<>();
variables.put("tender", tender.toString());
variables.put("userTender", userTender.toString());
Prompt prompt = promptTemplate.apply(variables);
ChatLanguageModel model = OpenAiChatModel.builder()
.apiKey("KEY")
.modelName(GPT_3_5_TURBO)
.temperature(0.3)
.build();
String response = model.generate(prompt.text());
System.out.println(response);
return response;
}
}
| [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((957, 1097), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((957, 1072), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((957, 1038), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((957, 1013), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1948, 2109), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1948, 2084), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1948, 2050), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1948, 2008), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3065, 3222), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3065, 3197), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3065, 3163), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3065, 3121), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
package eu.luminis.faqlangchain.config;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.inprocess.InProcessEmbeddingModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiEmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import static dev.langchain4j.model.inprocess.InProcessEmbeddingModelType.*;
import static dev.langchain4j.model.openai.OpenAiModelName.*;
import static java.time.Duration.*;
@Configuration
public class QuestionAnsweringConfig {
@Value("${openai.apiKey}")
private String openaiApiKey;
@Qualifier("openaiModel")
@Bean
public EmbeddingModel openaiEmbeddingModel() {
return OpenAiEmbeddingModel.builder()
.apiKey(openaiApiKey)
.modelName(TEXT_EMBEDDING_ADA_002)
.build();
}
@Qualifier("inMemoryModel")
@Bean
public EmbeddingModel inMemoryEmbeddingModel() {
return new InProcessEmbeddingModel(ALL_MINILM_L6_V2);
}
@Qualifier("openaiChatModel")
@Bean
public ChatLanguageModel openaiChatModel() {
return OpenAiChatModel.builder()
.apiKey(openaiApiKey)
.modelName(GPT_3_5_TURBO)
.temperature(0.7)
.timeout(ofSeconds(15))
.maxRetries(3)
.logResponses(true)
.logRequests(true)
.build();
}
@Qualifier("inMemoryEmbeddingStore")
@Bean
public EmbeddingStore<TextSegment> inMemoryEmbeddingStore() {
return new InMemoryEmbeddingStore<>();
}
@Qualifier("weaviateEmbeddingStore")
@Bean
public EmbeddingStore<TextSegment> weaviateEmbeddingStore(@Value("${weaviate.apiKey}") String apiKey,
@Value("${weaviate.host}") String host) {
return WeaviateEmbeddingStore.builder()
.apiKey(apiKey)
.scheme("https")
.host(host)
.build();
}
}
| [
"dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder",
"dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder"
] | [((1210, 1354), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((1210, 1329), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((1210, 1278), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((1635, 1941), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1635, 1916), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1635, 1881), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1635, 1845), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1635, 1814), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1635, 1774), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1635, 1740), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1635, 1698), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2397, 2547), 'dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder'), ((2397, 2522), 'dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder'), ((2397, 2494), 'dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder'), ((2397, 2461), 'dev.langchain4j.store.embedding.weaviate.WeaviateEmbeddingStore.builder')] |
package com.example.demo;
import java.time.Duration;
import java.time.LocalDate;
import java.util.Arrays;
import java.util.List;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.input.structured.StructuredPrompt;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.output.structured.Description;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.MemoryId;
import dev.langchain4j.service.SystemMessage;
import dev.langchain4j.service.UserMessage;
import dev.langchain4j.service.V;
public class AiServicesExamples {
static Duration duration = Duration.ofSeconds(60);
static ChatLanguageModel model = OpenAiChatModel.builder().apiKey(ApiKeys.OPENAI_API_KEY).timeout(duration).build();
////////////////// SIMPLE EXAMPLE //////////////////////
static class Simple_AI_Service_Example {
interface Assistant {
String chat(String message);
}
public static void main(String[] args) {
Assistant assistant = AiServices.create(Assistant.class, model);
String userMessage = "Translate 'Plus-Values des cessions de valeurs mobilières, de droits sociaux et gains assimilés'";
String answer = assistant.chat(userMessage);
System.out.println(answer);
}
}
////////////////// WITH MESSAGE AND VARIABLES //////////////////////
static class AI_Service_with_System_and_User_Messages_Example {
interface TextUtils {
@SystemMessage("You are a professional translator into {{language}}")
@UserMessage("Translate the following text: {{text}}")
String translate(@V("text") String text, @V("language") String language);
@SystemMessage("Summarize every message from user in {{n}} bullet points. Provide only bullet points.")
List<String> summarize(@UserMessage String text, @V("n") int n);
}
public static void main(String[] args) {
TextUtils utils = AiServices.create(TextUtils.class, model);
String translation = utils.translate("Hello, how are you?", "italian");
System.out.println(translation); // Ciao, come stai?
String text = "AI, or artificial intelligence, is a branch of computer science that aims to create "
+ "machines that mimic human intelligence. This can range from simple tasks such as recognizing "
+ "patterns or speech to more complex tasks like making decisions or predictions.";
List<String> bulletPoints = utils.summarize(text, 3);
System.out.println(bulletPoints);
}
}
////////////////////EXTRACTING DIFFERENT DATA TYPES ////////////////////
static class Sentiment_Extracting_AI_Service_Example {
enum Sentiment {
POSITIVE, NEUTRAL, NEGATIVE;
}
interface SentimentAnalyzer {
@UserMessage("Analyze sentiment of {{it}}")
Sentiment analyzeSentimentOf(String text);
@UserMessage("Does {{it}} have a positive sentiment?")
boolean isPositive(String text);
}
public static void main(String[] args) {
SentimentAnalyzer sentimentAnalyzer = AiServices.create(SentimentAnalyzer.class, model);
Sentiment sentiment = sentimentAnalyzer.analyzeSentimentOf("It is amazing!");
System.out.println(sentiment); // POSITIVE
boolean positive = sentimentAnalyzer.isPositive("It is bad!");
System.out.println(positive); // false
}
}
static class POJO_Extracting_AI_Service_Example {
static class Person {
private String firstName;
private String lastName;
private LocalDate birthDate;
@Override
public String toString() {
return "Person {" + " firstName = \"" + firstName + "\"" + ", lastName = \"" + lastName + "\""
+ ", birthDate = " + birthDate + " }";
}
}
interface PersonExtractor {
@UserMessage("Extract information about a person from {{it}}")
Person extractPersonFrom(String text);
}
public static void main(String[] args) {
PersonExtractor extractor = AiServices.create(PersonExtractor.class, model);
String text = "In 1968, amidst the fading echoes of Independence Day, "
+ "a child named John arrived under the calm evening sky. "
+ "This newborn, bearing the surname Doe, marked the start of a new journey.";
Person person = extractor.extractPersonFrom(text);
System.out.println(person); // Person { firstName = "John", lastName = "Doe", birthDate = 1968-07-04 }
}
}
////////////////////// DESCRIPTIONS ////////////////////////
static class POJO_With_Descriptions_Extracting_AI_Service_Example {
static class Recipe {
@Description("short title, 3 words maximum")
private String title;
@Description("short description, 2 sentences maximum")
private String description;
@Description("each step should be described in 6 to 8 words, steps should rhyme with each other")
private List<String> steps;
private Integer preparationTimeMinutes;
@Override
public String toString() {
return "Recipe {" +
" title = \"" + title + "\"" +
", description = \"" + description + "\"" +
", steps = " + steps +
", preparationTimeMinutes = " + preparationTimeMinutes +
" }";
}
}
@StructuredPrompt("Create a recipe of a {{dish}} that can be prepared using only {{ingredients}}")
static class CreateRecipePrompt {
private String dish;
private List<String> ingredients;
}
interface Chef {
Recipe createRecipeFrom(String... ingredients);
Recipe createRecipe(CreateRecipePrompt prompt);
}
public static void main(String[] args) {
Chef chef = AiServices.create(Chef.class, model);
Recipe recipe = chef.createRecipeFrom("cucumber", "tomato", "feta", "onion", "olives", "lemon");
System.out.println(recipe);
CreateRecipePrompt prompt = new CreateRecipePrompt();
prompt.dish = "oven dish";
prompt.ingredients = Arrays.asList("cucumber", "tomato", "feta", "onion", "olives", "potatoes");
Recipe anotherRecipe = chef.createRecipe(prompt);
System.out.println(anotherRecipe);
}
}
////////////////////////// WITH MEMORY /////////////////////////
static class ServiceWithMemoryExample {
interface Assistant {
String chat(String message);
}
public static void main(String[] args) {
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(model)
.chatMemory(chatMemory)
.build();
String answer = assistant.chat("Hello! My name is Klaus.");
System.out.println(answer); // Hello Klaus! How can I assist you today?
String answerWithName = assistant.chat("What is my name?");
System.out.println(answerWithName); // Your name is Klaus.
}
}
static class ServiceWithMemoryForEachUserExample {
interface Assistant {
String chat(@MemoryId int memoryId, @UserMessage String userMessage);
}
public static void main(String[] args) {
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(model)
.chatMemoryProvider(memoryId -> MessageWindowChatMemory.withMaxMessages(10))
.build();
System.out.println(assistant.chat(1, "Hello, my name is Klaus"));
// Hi Klaus! How can I assist you today?
System.out.println(assistant.chat(2, "Hello, my name is Francine"));
// Hello Francine! How can I assist you today?
System.out.println(assistant.chat(1, "What is my name?"));
// Your name is Klaus.
System.out.println(assistant.chat(2, "What is my name?"));
// Your name is Francine.
}
}
}
| [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((792, 874), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((792, 866), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((792, 848), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((6740, 6894), 'dev.langchain4j.service.AiServices.builder'), ((6740, 6865), 'dev.langchain4j.service.AiServices.builder'), ((6740, 6821), 'dev.langchain4j.service.AiServices.builder'), ((7478, 7685), 'dev.langchain4j.service.AiServices.builder'), ((7478, 7656), 'dev.langchain4j.service.AiServices.builder'), ((7478, 7559), 'dev.langchain4j.service.AiServices.builder')] |
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.service.AiServices;
public class _04_Agents {
static class Calculator {
@Tool("Calculates the length of a string")
int stringLength(String s) {
return s.length();
}
@Tool("Calculates the sum of two numbers")
int add(int a, int b) {
return a + b;
}
}
interface Assistant {
Response<AiMessage> chat(String userMessage);
}
public static void main(String[] args) {
String openAiKey = System.getenv("OPENAI_API_KEY");
var assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(openAiKey))
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.tools(new Calculator())
.build();
var question = "What is the sum of the numbers of letters in the words 'language' and 'model'";
var response = assistant.chat(question);
System.out.println(response.content().text());
System.out.println("\n\n########### TOKEN USAGE ############\n");
System.out.println(response.tokenUsage());
}
}
| [
"dev.langchain4j.service.AiServices.builder"
] | [((821, 1069), 'dev.langchain4j.service.AiServices.builder'), ((821, 1044), 'dev.langchain4j.service.AiServices.builder'), ((821, 1003), 'dev.langchain4j.service.AiServices.builder'), ((821, 930), 'dev.langchain4j.service.AiServices.builder')] |
package me.nzuguem.bot.configurations.llm;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.ChatMemoryProvider;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import jakarta.annotation.PreDestroy;
import jakarta.enterprise.context.RequestScoped;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
@RequestScoped
public class ChatMemoryBean implements ChatMemoryProvider {
private final Map<Object, ChatMemory> memories = new ConcurrentHashMap<>();
@Override
public ChatMemory get(Object memoryId) {
return memories.computeIfAbsent(memoryId, id -> MessageWindowChatMemory.builder()
.maxMessages(20)
.id(memoryId)
.build());
}
@PreDestroy
public void close() {
memories.clear();
}
}
| [
"dev.langchain4j.memory.chat.MessageWindowChatMemory.builder"
] | [((631, 752), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((631, 727), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((631, 697), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder')] |
package net.savantly.mainbot.config;
import java.time.Duration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import lombok.RequiredArgsConstructor;
import net.savantly.mainbot.service.replicate.ReplicateClient;
@Configuration
@RequiredArgsConstructor
public class ChatModelConfig {
private final OpenAIConfig openAIConfig;
@Bean
@Primary
@ConditionalOnProperty(prefix = "openai", name = "enabled", havingValue = "true")
public ChatLanguageModel getChatModel(ReplicateClient replicateClient) {
return getOpenAiChatModel();
// return new ReplicateChatLanguageModel(replicateClient);
}
public ChatLanguageModel getOpenAiChatModel() {
String apiKey = openAIConfig.getApiKey();
return OpenAiChatModel.builder()
.apiKey(apiKey) // https://platform.openai.com/account/api-keys
.modelName(openAIConfig.getChatModelId())
.temperature(0.1)
.logResponses(false)
.logRequests(false)
.timeout(Duration.ofSeconds(openAIConfig.getTimeoutSeconds()))
.build();
}
}
| [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((1056, 1430), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1056, 1405), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1056, 1326), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1056, 1290), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1056, 1253), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1056, 1219), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1056, 1113), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
package io.quarkiverse.langchain4j.workshop.chat;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.ChatMemoryProvider;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import jakarta.enterprise.context.ApplicationScoped;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
@ApplicationScoped
public class ChatMemoryBean implements ChatMemoryProvider {
private final Map<Object, ChatMemory> memories = new ConcurrentHashMap<>();
@Override
public ChatMemory get(Object memoryId) {
return memories.computeIfAbsent(memoryId, id -> MessageWindowChatMemory.builder()
.maxMessages(3)
.id(memoryId)
.build());
}
public void clear(Object session) {
memories.remove(session);
}
}
| [
"dev.langchain4j.memory.chat.MessageWindowChatMemory.builder"
] | [((608, 728), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((608, 703), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((608, 673), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder')] |
package io.quarkiverse.langchain4j.workshop.chat;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import io.quarkiverse.langchain4j.redis.RedisEmbeddingStore;
import io.quarkus.runtime.StartupEvent;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import java.io.File;
import java.util.List;
import static dev.langchain4j.data.document.splitter.DocumentSplitters.recursive;
@ApplicationScoped
public class DocumentIngestor {
/**
* The embedding store (the database).
* The bean is provided by the quarkus-langchain4j-redis extension.
*/
@Inject
RedisEmbeddingStore store;
/**
* The embedding model (how the vector of a document is computed).
* The bean is provided by the LLM (like openai) extension.
*/
@Inject
EmbeddingModel embeddingModel;
public void ingest(@Observes StartupEvent event) {
System.out.printf("Ingesting documents...%n");
List<Document> documents = FileSystemDocumentLoader.loadDocuments(new File("src/main/resources/catalog").toPath(), new TextDocumentParser());
var ingestor = EmbeddingStoreIngestor.builder()
.embeddingStore(store)
.embeddingModel(embeddingModel)
.documentSplitter(recursive(500, 0))
.build();
ingestor.ingest(documents);
System.out.printf("Ingested %d documents.%n", documents.size());
}
}
| [
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((1414, 1611), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1414, 1586), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1414, 1533), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1414, 1485), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
package com.example.demo;
import java.time.Duration;
import dev.langchain4j.chain.ConversationalChain;
import dev.langchain4j.model.openai.OpenAiChatModel;
public class _07_ConversationalChain {
public static void main(String[] args) {
Duration duration = Duration.ofSeconds(60);
OpenAiChatModel model = OpenAiChatModel.builder().apiKey(ApiKeys.OPENAI_API_KEY).timeout(duration).build();
ConversationalChain chain = ConversationalChain.builder().chatLanguageModel(model)
// .chatMemory(...) // you can override default chat memory
.build();
String userMessage1 = "Can you give a brief explanation of the Agile methodology, 3 lines max?";
System.out.println("[User]: " + userMessage1);
String answer1 = chain.execute(userMessage1);
System.out.println("[LLM]: " + answer1);
String userMessage2 = "What are good tools for that? 3 lines max.";
System.out.println("[User]: " + userMessage2);
String answer2 = chain.execute(userMessage2);
System.out.println("[LLM]: " + answer2);
}
}
| [
"dev.langchain4j.model.openai.OpenAiChatModel.builder",
"dev.langchain4j.chain.ConversationalChain.builder"
] | [((313, 395), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((313, 387), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((313, 369), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((428, 559), 'dev.langchain4j.chain.ConversationalChain.builder'), ((428, 482), 'dev.langchain4j.chain.ConversationalChain.builder')] |
package org.mf.langchain.service;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.model.localai.LocalAiChatModel;
import dev.langchain4j.model.localai.LocalAiStreamingChatModel;
import org.jetbrains.annotations.Nullable;
import org.mf.langchain.util.LanguageModel;
import org.mf.langchain.StreamLanguageModel;
import org.springframework.stereotype.Service;
import java.time.Duration;
import java.util.function.Consumer;
@Service
public class LangChainService {
private final LanguageModel lm;
private final StreamLanguageModel slm;
LangChainService() {
lm = new LanguageModel(LocalAiChatModel.builder()
.modelName("phi-2")
.baseUrl("http://localhost:8080")
.build());
slm = new StreamLanguageModel(LocalAiStreamingChatModel.builder()
.modelName("phi-2")
.baseUrl("http://localhost:8080")
.timeout(Duration.ofDays(1))
.temperature(0.8)
.build());
}
public String Generate(String prompt)
{
return lm.RunBlocking(prompt);
}
public void GenerateStream(String prompt, Consumer<String> onNext, Consumer<Throwable> onError, @Nullable Consumer<AiMessage> onComplete) {
slm.generate(prompt, onNext, onError, onComplete);
}
}
| [
"dev.langchain4j.model.localai.LocalAiChatModel.builder",
"dev.langchain4j.model.localai.LocalAiStreamingChatModel.builder"
] | [((623, 760), 'dev.langchain4j.model.localai.LocalAiChatModel.builder'), ((623, 735), 'dev.langchain4j.model.localai.LocalAiChatModel.builder'), ((623, 685), 'dev.langchain4j.model.localai.LocalAiChatModel.builder'), ((802, 1027), 'dev.langchain4j.model.localai.LocalAiStreamingChatModel.builder'), ((802, 1002), 'dev.langchain4j.model.localai.LocalAiStreamingChatModel.builder'), ((802, 968), 'dev.langchain4j.model.localai.LocalAiStreamingChatModel.builder'), ((802, 923), 'dev.langchain4j.model.localai.LocalAiStreamingChatModel.builder'), ((802, 873), 'dev.langchain4j.model.localai.LocalAiStreamingChatModel.builder')] |
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.service.AiServices;
public class _04_Agents {
static class Calculator {
@Tool("Calculates the length of a string")
int stringLength(String s) {
return s.length();
}
@Tool("Calculates the sum of two numbers")
int add(int a, int b) {
return a + b;
}
}
interface Assistant {
Response<AiMessage> chat(String userMessage);
}
public static void main(String[] args) {
String openAiKey = System.getenv("OPENAI_API_KEY");
var assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(openAiKey))
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.tools(new Calculator())
.build();
var question = "What is the sum of the numbers of letters in the words 'language' and 'model'";
var response = assistant.chat(question);
System.out.println(response.content().text());
System.out.println("\n\n########### TOKEN USAGE ############\n");
System.out.println(response.tokenUsage());
}
}
| [
"dev.langchain4j.service.AiServices.builder"
] | [((821, 1069), 'dev.langchain4j.service.AiServices.builder'), ((821, 1044), 'dev.langchain4j.service.AiServices.builder'), ((821, 1003), 'dev.langchain4j.service.AiServices.builder'), ((821, 930), 'dev.langchain4j.service.AiServices.builder')] |
package io.quarkiverse.langchain4j.workshop.chat;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import io.quarkiverse.langchain4j.redis.RedisEmbeddingStore;
import io.quarkus.runtime.StartupEvent;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import java.io.File;
import java.util.List;
import static dev.langchain4j.data.document.splitter.DocumentSplitters.recursive;
@ApplicationScoped
public class DocumentIngestor {
/**
* The embedding store (the database).
* The bean is provided by the quarkus-langchain4j-redis extension.
*/
@Inject
RedisEmbeddingStore store;
/**
* The embedding model (how the vector of a document is computed).
* The bean is provided by the LLM (like openai) extension.
*/
@Inject
EmbeddingModel embeddingModel;
public void ingest(@Observes StartupEvent event) {
System.out.printf("Ingesting documents...%n");
List<Document> documents = FileSystemDocumentLoader.loadDocuments(new File("src/main/resources/catalog").toPath(), new TextDocumentParser());
var ingestor = EmbeddingStoreIngestor.builder()
.embeddingStore(store)
.embeddingModel(embeddingModel)
.documentSplitter(recursive(500, 0))
.build();
ingestor.ingest(documents);
System.out.printf("Ingested %d documents.%n", documents.size());
}
}
| [
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((1414, 1611), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1414, 1586), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1414, 1533), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1414, 1485), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
package io.quarkiverse.langchain4j.samples;
import java.util.function.Supplier;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.ChatMemoryProvider;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.store.memory.chat.InMemoryChatMemoryStore;
public class CustomProvider implements Supplier<ChatMemoryProvider> {
private final InMemoryChatMemoryStore store = new InMemoryChatMemoryStore();
@Override
public ChatMemoryProvider get() {
return new ChatMemoryProvider() {
@Override
public ChatMemory get(Object memoryId) {
return MessageWindowChatMemory.builder()
.maxMessages(20)
.id(memoryId)
.chatMemoryStore(store)
.build();
}
};
}
}
| [
"dev.langchain4j.memory.chat.MessageWindowChatMemory.builder"
] | [((652, 845), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((652, 812), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((652, 764), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((652, 726), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder')] |
package dev.onurb.travelassistant;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.service.AiServices;
import java.io.IOException;
import java.time.Duration;
import java.util.Scanner;
public class TravelAgency {
public static void main(String[] args) throws IOException {
String apiKey = System.getenv("OPENAPI_KEY");
TravelAssistant assistant = AiServices.builder(TravelAssistant.class)
.chatLanguageModel(OpenAiChatModel.builder().apiKey(apiKey).timeout(Duration.ofMinutes(3)).build())
.tools(new TripServices())
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.build();
String input = readInput();
while (!"bye".equalsIgnoreCase(input)) {
String answer = assistant.chat(input);
System.out.println("\u001B[33m" + answer + "\u001B[37m");
input = readInput();
}
}
private static String readInput() {
Scanner in = new Scanner(System.in);
System.out.print("> ");
return in.nextLine();
}
}
| [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((460, 758), 'dev.langchain4j.service.AiServices.builder'), ((460, 733), 'dev.langchain4j.service.AiServices.builder'), ((460, 660), 'dev.langchain4j.service.AiServices.builder'), ((460, 617), 'dev.langchain4j.service.AiServices.builder'), ((537, 616), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((537, 608), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((537, 577), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
/*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gemini.workshop;
import dev.langchain4j.agent.tool.P;
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel;
import dev.langchain4j.service.AiServices;
public class Step8b_FunctionCalling {
record WeatherForecast(String location, String forecast, int temperature) {}
static class WeatherForecastService {
@Tool("Get the weather forecast for a location")
WeatherForecast getForecast(@P("Location to get the forecast for") String location) {
if (location.equals("Paris")) {
return new WeatherForecast("Paris", "Sunny", 20);
} else if (location.equals("London")) {
return new WeatherForecast("London", "Rainy", 15);
} else {
return new WeatherForecast("Unknown", "Unknown", 0);
}
}
}
interface WeatherAssistant {
String chat(String userMessage);
}
public static void main(String[] args) {
ChatLanguageModel model = VertexAiGeminiChatModel.builder()
.project(System.getenv("PROJECT_ID"))
.location(System.getenv("LOCATION"))
.modelName("gemini-1.0-pro")
.maxOutputTokens(100)
.build();
WeatherForecastService weatherForecastService = new WeatherForecastService();
WeatherAssistant assistant = AiServices.builder(WeatherAssistant.class)
.chatLanguageModel(model)
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.tools(weatherForecastService)
.build();
System.out.println(assistant.chat("What is the weather in Paris?"));
System.out.println(assistant.chat("What is the weather in London?"));
System.out.println(assistant.chat("Is the temperature warmer in Paris or London?"));
}
}
| [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder"
] | [((1743, 1971), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1743, 1950), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1743, 1916), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1743, 1875), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1743, 1826), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((2098, 2311), 'dev.langchain4j.service.AiServices.builder'), ((2098, 2290), 'dev.langchain4j.service.AiServices.builder'), ((2098, 2247), 'dev.langchain4j.service.AiServices.builder'), ((2098, 2178), 'dev.langchain4j.service.AiServices.builder')] |
package com.hillarocket.application.handler;
import com.vaadin.flow.server.auth.AnonymousAllowed;
import dev.hilla.BrowserCallable;
import dev.langchain4j.memory.chat.TokenWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.TokenStream;
import jakarta.annotation.PostConstruct;
import org.springframework.beans.factory.annotation.Value;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Sinks;
@BrowserCallable
@AnonymousAllowed
public class OpenApiHandler {
@Value("${openai.api.key}")
private String OPENAI_API_KEY;
private Assistant assistant;
private StreamingAssistant streamingAssistant;
interface Assistant {
String chat(String message);
}
interface StreamingAssistant {
TokenStream chat(String message);
}
@PostConstruct
public void init() {
if (OPENAI_API_KEY == null) {
System.err.println("ERROR: OPENAI_API_KEY environment variable is not set. Please set it to your OpenAI API key.");
}
var memory = TokenWindowChatMemory.withMaxTokens(2000, new OpenAiTokenizer("gpt-3.5-turbo"));
assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(OPENAI_API_KEY))
.chatMemory(memory)
.build();
streamingAssistant = AiServices.builder(StreamingAssistant.class)
.streamingChatLanguageModel(OpenAiStreamingChatModel.withApiKey(OPENAI_API_KEY))
.chatMemory(memory)
.build();
}
public String chat(String message) {
return assistant.chat(message);
}
public Flux<String> chatStream(String message) {
Sinks.Many<String> sink = Sinks.many().unicast().onBackpressureBuffer();
streamingAssistant.chat(message)
.onNext(sink::tryEmitNext)
.onComplete(c -> sink.tryEmitComplete())
.onError(sink::tryEmitError)
.start();
return sink.asFlux();
}
} | [
"dev.langchain4j.service.AiServices.builder"
] | [((1336, 1511), 'dev.langchain4j.service.AiServices.builder'), ((1336, 1486), 'dev.langchain4j.service.AiServices.builder'), ((1336, 1450), 'dev.langchain4j.service.AiServices.builder'), ((1543, 1745), 'dev.langchain4j.service.AiServices.builder'), ((1543, 1720), 'dev.langchain4j.service.AiServices.builder'), ((1543, 1684), 'dev.langchain4j.service.AiServices.builder'), ((1929, 1974), 'reactor.core.publisher.Sinks.many'), ((1929, 1951), 'reactor.core.publisher.Sinks.many')] |
package _Engenharia;
import dev.langchain4j.chain.ConversationalRetrievalChain;
import dev.langchain4j.data.document.Document;
//import dev.langchain4j.data.document.splitter.ParagraphSplitter; !!!!!!!!!!!!!!!DANDO ERRO, substitui temporariamente!!!!!!!!!!!!!!!!!!!!!
import dev.langchain4j.data.document.splitter.DocumentSplitters; //Substituição
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.huggingface.HuggingFaceChatModel;
import dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument;
import static java.time.Duration.ofSeconds;
import java.io.File;
public class Assistente {
// You can get your own HuggingFace API key here: https://huggingface.co/settings/tokens
public static final String hfApiKey = "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";
private static String pergunta;
public String fazerPergunta() throws Exception {
Document document = loadDocument(toPath("template.txt")); //Usa documento criado com todos os dados do documento selecionado (Esse documento e criado dentro do pacote _Engenharia)
//escolhendo um modelo para vetorizar meu texto
EmbeddingModel embeddingModel = HuggingFaceEmbeddingModel.builder()
.accessToken(hfApiKey)
.modelId("sentence-transformers/all-MiniLM-L6-v2")
.waitForModel(true)
.timeout(ofSeconds(60))
.build();
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
//estou aplicando o modelo de vetorização escolhido ao meu texto
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
// .splitter(new ParagraphSplitter()) !!!!!!!!!!!!!!!DANDO ERRO, substitui temporariamente!!!!!!!!!!!!!!!!!!!!!
.documentSplitter(DocumentSplitters.recursive(500)) //Substituição
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
ingestor.ingest(document);
//aqui eu escolho o modelo da inferência (a pergunta)
ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder()
.chatLanguageModel(HuggingFaceChatModel.withAccessToken(hfApiKey))
.retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel))
// .chatMemory() // you can override default chat memory
// .promptTemplate() // you can override default prompt template
.build();
//aqui eu faço a inferência
String answer = chain.execute(pergunta);
File delete_file = new File("src/main/java/_Engenharia/template.txt"); //Apaga o documento depois da resposta
delete_file.delete(); //Caso erro na resposta o arquivo NAO e deletado
return answer; // Charlie is a cheerful carrot living in VeggieVille...
//exemplo para continuar a pesquisa
//https://github.com/langchain4j/langchain4j/blob/7307f43d9823af619f1e3196252d212f3df04ddc/langchain4j/src/main/java/dev/langchain4j/model/huggingface/HuggingFaceChatModel.java
}
private static Path toPath(String fileName) {
try {
URL fileUrl = Assistente.class.getResource(fileName);
return Paths.get(fileUrl.toURI());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
public void setPergunta(String p) {
pergunta = p;
}
}
| [
"dev.langchain4j.chain.ConversationalRetrievalChain.builder",
"dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((1706, 1948), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1706, 1923), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1706, 1883), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1706, 1847), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1706, 1780), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((2162, 2524), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2162, 2499), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2162, 2451), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2162, 2385), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2675, 3064), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2675, 2885), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2675, 2796), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')] |
package com.kchandrakant;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.openai.OpenAiChatModel;
import java.util.HashMap;
import java.util.Map;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
import static java.time.Duration.ofSeconds;
public class PromptTemplates {
public static void main(String[] args) {
// Create a prompt template
PromptTemplate promptTemplate = PromptTemplate.from("Tell me a {{adjective}} joke about {{content}}..");
// Generate prompt using the prompt template and user variables
Map<String, Object> variables = new HashMap<>();
variables.put("adjective", "funny");
variables.put("content", "humans");
Prompt prompt = promptTemplate.apply(variables);
System.out.println(prompt.text());
// Create an instance of a model
ChatLanguageModel model = OpenAiChatModel.builder()
.apiKey(ApiKeys.OPENAI_API_KEY)
.modelName(GPT_3_5_TURBO)
.temperature(0.3)
.build();
// Start interacting
String response = model.generate(prompt.text());
System.out.println(response);
}
} | [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((1019, 1193), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1019, 1168), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1019, 1134), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1019, 1092), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
package com.azure.migration.java.copilot.service;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class Configure {
@Bean
ServiceAnalysisAgent chooseServiceAnalysisAgent(ChatLanguageModel chatLanguageModel) {
return AiServices.builder(ServiceAnalysisAgent.class)
.chatLanguageModel(chatLanguageModel)
.build();
}
@Bean
ConfigureResourceAgent configureResourceAgent(ChatLanguageModel chatLanguageModel,ContentRetriever contentRetriever) {
return AiServices.builder(ConfigureResourceAgent.class)
.chatLanguageModel(chatLanguageModel)
.contentRetriever(contentRetriever)
.build();
}
@Bean
WorkflowChatAgent configureWorkflowChatAgent(ChatLanguageModel chatLanguageModel, ContentRetriever contentRetriever, MigrationWorkflowTools migrationWorkflowTools) {
return AiServices.builder(WorkflowChatAgent.class)
.chatLanguageModel(chatLanguageModel)
.tools(migrationWorkflowTools)
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.build();
}
@Bean
ContentRetriever contentRetriever(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel) {
// You will need to adjust these parameters to find the optimal setting, which will depend on two main factors:
// - The nature of your data
// - The embedding model you are using
int maxResults = 5;
double minScore = 0.6;
return EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.maxResults(maxResults)
.minScore(minScore)
.build();
}
@Bean
EmbeddingModel embeddingModel() {
return new AllMiniLmL6V2EmbeddingModel();
}
}
| [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder"
] | [((846, 971), 'dev.langchain4j.service.AiServices.builder'), ((846, 946), 'dev.langchain4j.service.AiServices.builder'), ((1128, 1307), 'dev.langchain4j.service.AiServices.builder'), ((1128, 1282), 'dev.langchain4j.service.AiServices.builder'), ((1128, 1230), 'dev.langchain4j.service.AiServices.builder'), ((1511, 1753), 'dev.langchain4j.service.AiServices.builder'), ((1511, 1728), 'dev.langchain4j.service.AiServices.builder'), ((1511, 1655), 'dev.langchain4j.service.AiServices.builder'), ((1511, 1608), 'dev.langchain4j.service.AiServices.builder'), ((2167, 2404), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2167, 2379), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2167, 2343), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2167, 2303), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2167, 2255), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder')] |
package com.example.application.services;
import com.vaadin.flow.server.auth.AnonymousAllowed;
import dev.hilla.BrowserCallable;
import dev.langchain4j.memory.chat.TokenWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.TokenStream;
import jakarta.annotation.PostConstruct;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Sinks;
@Service
@BrowserCallable
@AnonymousAllowed
public class ChatService {
@Value("${openai.api.key}")
private String OPENAI_API_KEY;
private Assistant assistant;
private StreamingAssistant streamingAssistant;
interface Assistant {
String chat(String message);
}
interface StreamingAssistant {
TokenStream chat(String message);
}
@PostConstruct
public void init() {
var memory = TokenWindowChatMemory.withMaxTokens(2000, new OpenAiTokenizer("gpt-3.5-turbo"));
assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(OPENAI_API_KEY))
.chatMemory(memory)
.build();
streamingAssistant = AiServices.builder(StreamingAssistant.class)
.streamingChatLanguageModel(OpenAiStreamingChatModel.withApiKey(OPENAI_API_KEY))
.chatMemory(memory)
.build();
}
public String chat(String message) {
return assistant.chat(message);
}
public Flux<String> chatStream(String message) {
Sinks.Many<String> sink = Sinks.many().unicast().onBackpressureBuffer();
streamingAssistant.chat(message)
.onNext(sink::tryEmitNext)
.onComplete(sink::tryEmitComplete)
.onError(sink::tryEmitError)
.start();
return sink.asFlux();
}
}
| [
"dev.langchain4j.service.AiServices.builder"
] | [((1208, 1383), 'dev.langchain4j.service.AiServices.builder'), ((1208, 1358), 'dev.langchain4j.service.AiServices.builder'), ((1208, 1322), 'dev.langchain4j.service.AiServices.builder'), ((1415, 1617), 'dev.langchain4j.service.AiServices.builder'), ((1415, 1592), 'dev.langchain4j.service.AiServices.builder'), ((1415, 1556), 'dev.langchain4j.service.AiServices.builder'), ((1801, 1846), 'reactor.core.publisher.Sinks.many'), ((1801, 1823), 'reactor.core.publisher.Sinks.many')] |
package org.acme;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import io.quarkus.logging.Log;
import io.quarkus.runtime.Startup;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import jakarta.json.Json;
import jakarta.json.JsonArray;
import jakarta.json.JsonReader;
import jakarta.json.JsonValue;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.util.ArrayList;
import java.util.List;
import static dev.langchain4j.data.document.splitter.DocumentSplitters.recursive;
@ApplicationScoped
public class IngestData {
@Inject
EmbeddingStore<TextSegment> store;
@Inject
EmbeddingModel embeddingModel;
@Inject
@ConfigProperty(name = "data.file")
File dataFile;
@Inject
@ConfigProperty(name = "max.entries", defaultValue = "99999")
Integer maxEntries;
@Startup
public void init() {
List<Document> documents = new ArrayList<>();
try(JsonReader reader = Json.createReader(new FileReader(dataFile))) {
JsonArray results = reader.readArray();
Log.info("Ingesting news reports...");
int i = 0;
for (JsonValue newsEntry : results) {
i++;
if(i > maxEntries) {
break;
}
String content = newsEntry.asJsonObject().getString("content", null);
if(content != null && !content.isEmpty()) {
Document doc = new Document(content);
documents.add(doc);
continue;
}
String fullDescription = newsEntry.asJsonObject().getString("full_description", null);
if(fullDescription != null && !fullDescription.isEmpty()) {
Document doc = new Document(fullDescription);
documents.add(doc);
continue;
}
String description = newsEntry.asJsonObject().getString("description", null);
if(description != null && !description.isEmpty()) {
Document doc = new Document(description);
documents.add(doc);
continue;
}
}
var ingestor = EmbeddingStoreIngestor.builder()
.embeddingStore(store)
.embeddingModel(embeddingModel)
.documentSplitter(recursive(1000, 50))
.build();
ingestor.ingest(documents);
Log.infof("Ingested %d news articles.", documents.size());
} catch (FileNotFoundException e) {
throw new RuntimeException(e);
}
}
}
| [
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((2590, 2805), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2590, 2776), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2590, 2717), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2590, 2665), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
package com.sivalabs.demo.langchain4j;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.ollama.OllamaChatModel;
public class OllamaChatDemo {
public static void main(String[] args) {
ChatLanguageModel model = OllamaChatModel.builder()
.baseUrl("http://localhost:11434")
.modelName("llama2")
.build();
String answer = model.generate("List all the movies directed by Quentin Tarantino");
System.out.println(answer);
}
}
| [
"dev.langchain4j.model.ollama.OllamaChatModel.builder"
] | [((257, 395), 'dev.langchain4j.model.ollama.OllamaChatModel.builder'), ((257, 370), 'dev.langchain4j.model.ollama.OllamaChatModel.builder'), ((257, 333), 'dev.langchain4j.model.ollama.OllamaChatModel.builder')] |
package com.ramesh.langchain;
import java.util.Scanner;
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.service.AiServices;
/***
* This project demostrates the use of LangCHain Services which uses custom tools to generate the final output
*/
public class ServiceWithToolsLive {
// Open AI Key and Chat GPT Model to use
public static String OPENAI_API_KEY = "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";
public static String OPENAI_MODEL = "gpt-3.5-turbo";
public static void main(String[] args) {
System.out.println("Using a custom Calculator as LangChain \"tool\"");
// Building a Custom LangChain Assistant using LangChain AiServices
System.out.println("Building a Custom Assistant using LangChain AiServices");
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(OPENAI_API_KEY)).tools(new Calculator())
.chatMemory(MessageWindowChatMemory.withMaxMessages(10)).build();
while (true) {
// get 2 words for which the total characters count is calculated
Scanner scanner = new Scanner(System.in);
System.out.print("Enter Word 1:");
String word1 = scanner.nextLine();
System.out.print("Enter Word 2:");
String word2 = scanner.nextLine();
String question = "What is the sum of the numbers of letters in the words \"" + word1 + "\" and \"" + word2 + "\"?";
System.out.println("Prompting ChatGPT :" + question);
// when a prompt having 2 words are sent LLM via LAngChain Assistant
// the Calcualtor functions are called to get the final answers
System.out.println("Invoking Custom Assistant Class chat() and getting response from ChatGPT...");
String answer = assistant.chat(question);
System.out.println("ChatGPT Response...\n");
System.out.println(answer);
}
}
// a custom tool
static class Calculator {
@Tool("Calculates the length of a string")
int stringLength(String s) {
return s.length();
}
@Tool("Calculates the sum of two numbers")
int add(int a, int b) {
return a + b;
}
}
interface Assistant {
String chat(String userMessage);
}
}
| [
"dev.langchain4j.service.AiServices.builder"
] | [((896, 1091), 'dev.langchain4j.service.AiServices.builder'), ((896, 1083), 'dev.langchain4j.service.AiServices.builder'), ((896, 1022), 'dev.langchain4j.service.AiServices.builder'), ((896, 998), 'dev.langchain4j.service.AiServices.builder')] |
package ${{ values.basePackage }};
import java.io.IOException;
import java.nio.file.Path;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentParser;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import org.springframework.util.ResourceUtils;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RestController;
@SpringBootApplication
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class, args);
}
@Bean
ChatAgent chatAgent(ChatLanguageModel chatLanguageModel) {
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
return AiServices.builder(ChatAgent.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(chatMemory)
.build();
}
@Bean
DocumentAgent documentAgent(ChatLanguageModel chatLanguageModel, EmbeddingModel embeddingModel, EmbeddingStore<TextSegment> embeddingStore) throws IOException {
Path documentPath = ResourceUtils.getFile("classpath:documents/story.md").toPath();
DocumentParser documentParser = new TextDocumentParser();
Document document = FileSystemDocumentLoader.loadDocument(documentPath, documentParser);
EmbeddingStoreIngestor dataIngestor = EmbeddingStoreIngestor.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.documentSplitter(DocumentSplitters.recursive(300, 10))
.build();
dataIngestor.ingest(document);
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.maxResults(3)
.minScore(0.5)
.build();
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
return AiServices.builder(DocumentAgent.class)
.chatLanguageModel(chatLanguageModel)
.contentRetriever(contentRetriever)
.chatMemory(chatMemory)
.build();
}
}
@RestController
class ChatController {
private final ChatAgent chatAgent;
ChatController(ChatAgent chatAgent) {
this.chatAgent = chatAgent;
}
@PostMapping("/chat")
String chat(@RequestBody String prompt) {
return chatAgent.answer(prompt);
}
}
@RestController
class DocumentController {
private final DocumentAgent documentAgent;
DocumentController(DocumentAgent documentAgent) {
this.documentAgent = documentAgent;
}
@PostMapping("/chat/doc")
String chat(@RequestBody String prompt) {
return documentAgent.answer(prompt);
}
}
interface ChatAgent {
String answer(String prompt);
}
interface DocumentAgent {
String answer(String prompt);
}
| [
"dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder",
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((1657, 1775), 'dev.langchain4j.service.AiServices.builder'), ((1657, 1762), 'dev.langchain4j.service.AiServices.builder'), ((1657, 1734), 'dev.langchain4j.service.AiServices.builder'), ((1972, 2034), 'org.springframework.util.ResourceUtils.getFile'), ((2228, 2405), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2228, 2392), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2228, 2332), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2228, 2296), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2479, 2642), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2479, 2629), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2479, 2610), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2479, 2591), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2479, 2555), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2727, 2889), 'dev.langchain4j.service.AiServices.builder'), ((2727, 2876), 'dev.langchain4j.service.AiServices.builder'), ((2727, 2848), 'dev.langchain4j.service.AiServices.builder'), ((2727, 2808), 'dev.langchain4j.service.AiServices.builder')] |
package com.docuverse.backend.configuration;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.openai.OpenAiEmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import io.github.cdimascio.dotenv.Dotenv;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import static dev.langchain4j.model.openai.OpenAiModelName.TEXT_EMBEDDING_ADA_002;
import static java.time.Duration.ofSeconds;
@Configuration
public class EmbeddingModelConfiguration {
Dotenv dotenv = Dotenv.load();
@Bean
public EmbeddingModel embeddingModel() {
return OpenAiEmbeddingModel.builder()
.apiKey(dotenv.get("OPENAI_API_KEY"))
.modelName(TEXT_EMBEDDING_ADA_002)
.timeout(ofSeconds(15))
.logRequests(false)
.logResponses(false)
.build();
}
}
| [
"dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder"
] | [((784, 1057), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((784, 1032), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((784, 995), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((784, 959), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((784, 919), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((784, 868), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder')] |
package io.quarkiverse.langchain4j.openai.runtime;
import static io.quarkiverse.langchain4j.runtime.OptionalUtil.firstOrDefault;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Optional;
import java.util.function.Supplier;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.chat.DisabledChatLanguageModel;
import dev.langchain4j.model.chat.DisabledStreamingChatLanguageModel;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
import dev.langchain4j.model.embedding.DisabledEmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.image.DisabledImageModel;
import dev.langchain4j.model.image.ImageModel;
import dev.langchain4j.model.moderation.DisabledModerationModel;
import dev.langchain4j.model.moderation.ModerationModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiEmbeddingModel;
import dev.langchain4j.model.openai.OpenAiModerationModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import io.quarkiverse.langchain4j.openai.QuarkusOpenAiClient;
import io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel;
import io.quarkiverse.langchain4j.openai.runtime.config.ChatModelConfig;
import io.quarkiverse.langchain4j.openai.runtime.config.EmbeddingModelConfig;
import io.quarkiverse.langchain4j.openai.runtime.config.ImageModelConfig;
import io.quarkiverse.langchain4j.openai.runtime.config.LangChain4jOpenAiConfig;
import io.quarkiverse.langchain4j.openai.runtime.config.ModerationModelConfig;
import io.quarkiverse.langchain4j.runtime.NamedModelUtil;
import io.quarkus.runtime.ShutdownContext;
import io.quarkus.runtime.annotations.Recorder;
import io.smallrye.config.ConfigValidationException;
@Recorder
public class OpenAiRecorder {
private static final String DUMMY_KEY = "dummy";
public Supplier<ChatLanguageModel> chatModel(LangChain4jOpenAiConfig runtimeConfig, String modelName) {
LangChain4jOpenAiConfig.OpenAiConfig openAiConfig = correspondingOpenAiConfig(runtimeConfig, modelName);
if (openAiConfig.enableIntegration()) {
String apiKey = openAiConfig.apiKey();
if (DUMMY_KEY.equals(apiKey)) {
throw new ConfigValidationException(createApiKeyConfigProblems(modelName));
}
ChatModelConfig chatModelConfig = openAiConfig.chatModel();
var builder = OpenAiChatModel.builder()
.baseUrl(openAiConfig.baseUrl())
.apiKey(apiKey)
.timeout(openAiConfig.timeout())
.maxRetries(openAiConfig.maxRetries())
.logRequests(firstOrDefault(false, chatModelConfig.logRequests(), openAiConfig.logRequests()))
.logResponses(firstOrDefault(false, chatModelConfig.logResponses(), openAiConfig.logResponses()))
.modelName(chatModelConfig.modelName())
.temperature(chatModelConfig.temperature())
.topP(chatModelConfig.topP())
.presencePenalty(chatModelConfig.presencePenalty())
.frequencyPenalty(chatModelConfig.frequencyPenalty())
.responseFormat(chatModelConfig.responseFormat().orElse(null));
openAiConfig.organizationId().ifPresent(builder::organizationId);
if (chatModelConfig.maxTokens().isPresent()) {
builder.maxTokens(chatModelConfig.maxTokens().get());
}
return new Supplier<>() {
@Override
public ChatLanguageModel get() {
return builder.build();
}
};
} else {
return new Supplier<>() {
@Override
public ChatLanguageModel get() {
return new DisabledChatLanguageModel();
}
};
}
}
public Supplier<StreamingChatLanguageModel> streamingChatModel(LangChain4jOpenAiConfig runtimeConfig, String modelName) {
LangChain4jOpenAiConfig.OpenAiConfig openAiConfig = correspondingOpenAiConfig(runtimeConfig, modelName);
if (openAiConfig.enableIntegration()) {
String apiKey = openAiConfig.apiKey();
if (DUMMY_KEY.equals(apiKey)) {
throw new ConfigValidationException(createApiKeyConfigProblems(modelName));
}
ChatModelConfig chatModelConfig = openAiConfig.chatModel();
var builder = OpenAiStreamingChatModel.builder()
.baseUrl(openAiConfig.baseUrl())
.apiKey(apiKey)
.timeout(openAiConfig.timeout())
.logRequests(firstOrDefault(false, chatModelConfig.logRequests(), openAiConfig.logRequests()))
.logResponses(firstOrDefault(false, chatModelConfig.logResponses(), openAiConfig.logResponses()))
.modelName(chatModelConfig.modelName())
.temperature(chatModelConfig.temperature())
.topP(chatModelConfig.topP())
.presencePenalty(chatModelConfig.presencePenalty())
.frequencyPenalty(chatModelConfig.frequencyPenalty())
.responseFormat(chatModelConfig.responseFormat().orElse(null));
openAiConfig.organizationId().ifPresent(builder::organizationId);
if (chatModelConfig.maxTokens().isPresent()) {
builder.maxTokens(chatModelConfig.maxTokens().get());
}
return new Supplier<>() {
@Override
public StreamingChatLanguageModel get() {
return builder.build();
}
};
} else {
return new Supplier<>() {
@Override
public StreamingChatLanguageModel get() {
return new DisabledStreamingChatLanguageModel();
}
};
}
}
public Supplier<EmbeddingModel> embeddingModel(LangChain4jOpenAiConfig runtimeConfig, String modelName) {
LangChain4jOpenAiConfig.OpenAiConfig openAiConfig = correspondingOpenAiConfig(runtimeConfig, modelName);
if (openAiConfig.enableIntegration()) {
String apiKeyOpt = openAiConfig.apiKey();
if (DUMMY_KEY.equals(apiKeyOpt)) {
throw new ConfigValidationException(createApiKeyConfigProblems(modelName));
}
EmbeddingModelConfig embeddingModelConfig = openAiConfig.embeddingModel();
var builder = OpenAiEmbeddingModel.builder()
.baseUrl(openAiConfig.baseUrl())
.apiKey(apiKeyOpt)
.timeout(openAiConfig.timeout())
.maxRetries(openAiConfig.maxRetries())
.logRequests(firstOrDefault(false, embeddingModelConfig.logRequests(), openAiConfig.logRequests()))
.logResponses(firstOrDefault(false, embeddingModelConfig.logResponses(), openAiConfig.logResponses()))
.modelName(embeddingModelConfig.modelName());
if (embeddingModelConfig.user().isPresent()) {
builder.user(embeddingModelConfig.user().get());
}
openAiConfig.organizationId().ifPresent(builder::organizationId);
return new Supplier<>() {
@Override
public EmbeddingModel get() {
return builder.build();
}
};
} else {
return new Supplier<>() {
@Override
public EmbeddingModel get() {
return new DisabledEmbeddingModel();
}
};
}
}
public Supplier<ModerationModel> moderationModel(LangChain4jOpenAiConfig runtimeConfig, String modelName) {
LangChain4jOpenAiConfig.OpenAiConfig openAiConfig = correspondingOpenAiConfig(runtimeConfig, modelName);
if (openAiConfig.enableIntegration()) {
String apiKey = openAiConfig.apiKey();
if (DUMMY_KEY.equals(apiKey)) {
throw new ConfigValidationException(createApiKeyConfigProblems(modelName));
}
ModerationModelConfig moderationModelConfig = openAiConfig.moderationModel();
var builder = OpenAiModerationModel.builder()
.baseUrl(openAiConfig.baseUrl())
.apiKey(apiKey)
.timeout(openAiConfig.timeout())
.maxRetries(openAiConfig.maxRetries())
.logRequests(firstOrDefault(false, moderationModelConfig.logRequests(), openAiConfig.logRequests()))
.logResponses(firstOrDefault(false, moderationModelConfig.logResponses(), openAiConfig.logResponses()))
.modelName(moderationModelConfig.modelName());
openAiConfig.organizationId().ifPresent(builder::organizationId);
return new Supplier<>() {
@Override
public ModerationModel get() {
return builder.build();
}
};
} else {
return new Supplier<>() {
@Override
public ModerationModel get() {
return new DisabledModerationModel();
}
};
}
}
public Supplier<ImageModel> imageModel(LangChain4jOpenAiConfig runtimeConfig, String modelName) {
LangChain4jOpenAiConfig.OpenAiConfig openAiConfig = correspondingOpenAiConfig(runtimeConfig, modelName);
if (openAiConfig.enableIntegration()) {
String apiKey = openAiConfig.apiKey();
if (DUMMY_KEY.equals(apiKey)) {
throw new ConfigValidationException(createApiKeyConfigProblems(modelName));
}
ImageModelConfig imageModelConfig = openAiConfig.imageModel();
var builder = QuarkusOpenAiImageModel.builder()
.baseUrl(openAiConfig.baseUrl())
.apiKey(apiKey)
.timeout(openAiConfig.timeout())
.maxRetries(openAiConfig.maxRetries())
.logRequests(firstOrDefault(false, imageModelConfig.logRequests(), openAiConfig.logRequests()))
.logResponses(firstOrDefault(false, imageModelConfig.logResponses(), openAiConfig.logResponses()))
.modelName(imageModelConfig.modelName())
.size(imageModelConfig.size())
.quality(imageModelConfig.quality())
.style(imageModelConfig.style())
.responseFormat(imageModelConfig.responseFormat())
.user(imageModelConfig.user());
openAiConfig.organizationId().ifPresent(builder::organizationId);
// we persist if the directory was set explicitly and the boolean flag was not set to false
// or if the boolean flag was set explicitly to true
Optional<Path> persistDirectory = Optional.empty();
if (imageModelConfig.persist().isPresent()) {
if (imageModelConfig.persist().get()) {
persistDirectory = imageModelConfig.persistDirectory().or(new Supplier<>() {
@Override
public Optional<? extends Path> get() {
return Optional.of(Paths.get(System.getProperty("java.io.tmpdir"), "dall-e-images"));
}
});
}
} else {
if (imageModelConfig.persistDirectory().isPresent()) {
persistDirectory = imageModelConfig.persistDirectory();
}
}
builder.persistDirectory(persistDirectory);
return new Supplier<>() {
@Override
public ImageModel get() {
return builder.build();
}
};
} else {
return new Supplier<>() {
@Override
public ImageModel get() {
return new DisabledImageModel();
}
};
}
}
private LangChain4jOpenAiConfig.OpenAiConfig correspondingOpenAiConfig(LangChain4jOpenAiConfig runtimeConfig,
String modelName) {
LangChain4jOpenAiConfig.OpenAiConfig openAiConfig;
if (NamedModelUtil.isDefault(modelName)) {
openAiConfig = runtimeConfig.defaultConfig();
} else {
openAiConfig = runtimeConfig.namedConfig().get(modelName);
}
return openAiConfig;
}
private ConfigValidationException.Problem[] createApiKeyConfigProblems(String modelName) {
return createConfigProblems("api-key", modelName);
}
private ConfigValidationException.Problem[] createConfigProblems(String key, String modelName) {
return new ConfigValidationException.Problem[] { createConfigProblem(key, modelName) };
}
private ConfigValidationException.Problem createConfigProblem(String key, String modelName) {
return new ConfigValidationException.Problem(String.format(
"SRCFG00014: The config property quarkus.langchain4j.openai%s%s is required but it could not be found in any config source",
NamedModelUtil.isDefault(modelName) ? "." : ("." + modelName + "."), key));
}
public void cleanUp(ShutdownContext shutdown) {
shutdown.addShutdownTask(new Runnable() {
@Override
public void run() {
QuarkusOpenAiClient.clearCache();
}
});
}
}
| [
"dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder",
"dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder",
"dev.langchain4j.model.openai.OpenAiModerationModel.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((2450, 3312), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2450, 3229), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2450, 3155), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2450, 3083), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2450, 3033), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2450, 2969), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2450, 2909), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2450, 2791), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2450, 2676), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2450, 2617), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2450, 2564), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2450, 2528), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((4555, 5367), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((4555, 5284), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((4555, 5210), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((4555, 5138), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((4555, 5088), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((4555, 5024), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((4555, 4964), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((4555, 4846), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((4555, 4731), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((4555, 4678), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((4555, 4642), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((6642, 7184), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((6642, 7119), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((6642, 6996), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((6642, 6876), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((6642, 6817), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((6642, 6764), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((6642, 6725), 'dev.langchain4j.model.openai.OpenAiEmbeddingModel.builder'), ((8417, 8960), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((8417, 8894), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((8417, 8770), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((8417, 8649), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((8417, 8590), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((8417, 8537), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((8417, 8501), 'dev.langchain4j.model.openai.OpenAiModerationModel.builder'), ((10032, 10845), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder'), ((10032, 10794), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder'), ((10032, 10723), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder'), ((10032, 10670), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder'), ((10032, 10613), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder'), ((10032, 10562), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder'), ((10032, 10501), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder'), ((10032, 10382), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder'), ((10032, 10266), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder'), ((10032, 10207), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder'), ((10032, 10154), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder'), ((10032, 10118), 'io.quarkiverse.langchain4j.openai.QuarkusOpenAiImageModel.builder')] |
package io.quarkiverse.langchain4j.sample;
import java.util.function.Supplier;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
public class MyChatModelSupplier implements Supplier<ChatLanguageModel> {
@Override
public ChatLanguageModel get() {
return OpenAiChatModel.builder()
.apiKey("...")
.build();
}
}
| [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((328, 409), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((328, 384), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
package com.tencent.supersonic.headless.core.chat.parser.llm;
import com.tencent.supersonic.common.util.JsonUtil;
import com.tencent.supersonic.headless.core.config.OptimizationConfig;
import com.tencent.supersonic.headless.core.chat.query.llm.s2sql.LLMReq;
import com.tencent.supersonic.headless.core.chat.query.llm.s2sql.LLMReq.SqlGenerationMode;
import com.tencent.supersonic.headless.core.chat.query.llm.s2sql.LLMResp;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.output.Response;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
@Service
public class TwoPassSCSqlGeneration implements SqlGeneration, InitializingBean {
private static final Logger keyPipelineLog = LoggerFactory.getLogger("keyPipeline");
@Autowired
private ChatLanguageModel chatLanguageModel;
@Autowired
private SqlExamplarLoader sqlExamplarLoader;
@Autowired
private OptimizationConfig optimizationConfig;
@Autowired
private SqlPromptGenerator sqlPromptGenerator;
@Override
public LLMResp generation(LLMReq llmReq, Long dataSetId) {
//1.retriever sqlExamples and generate exampleListPool
keyPipelineLog.info("dataSetId:{},llmReq:{}", dataSetId, llmReq);
List<Map<String, String>> sqlExamples = sqlExamplarLoader.retrieverSqlExamples(llmReq.getQueryText(),
optimizationConfig.getText2sqlExampleNum());
List<List<Map<String, String>>> exampleListPool = sqlPromptGenerator.getExampleCombos(sqlExamples,
optimizationConfig.getText2sqlFewShotsNum(), optimizationConfig.getText2sqlSelfConsistencyNum());
//2.generator linking prompt,and parallel generate response.
List<String> linkingPromptPool = sqlPromptGenerator.generatePromptPool(llmReq, exampleListPool, false);
List<String> linkingResults = new CopyOnWriteArrayList<>();
linkingPromptPool.parallelStream().forEach(
linkingPrompt -> {
Prompt prompt = PromptTemplate.from(JsonUtil.toString(linkingPrompt)).apply(new HashMap<>());
keyPipelineLog.info("step one request prompt:{}", prompt.toSystemMessage());
Response<AiMessage> linkingResult = chatLanguageModel.generate(prompt.toSystemMessage());
String result = linkingResult.content().text();
keyPipelineLog.info("step one model response:{}", result);
linkingResults.add(OutputFormat.getSchemaLink(result));
}
);
List<String> sortedList = OutputFormat.formatList(linkingResults);
Pair<String, Map<String, Double>> linkingMap = OutputFormat.selfConsistencyVote(sortedList);
//3.generator sql prompt,and parallel generate response.
List<String> sqlPromptPool = sqlPromptGenerator.generateSqlPromptPool(llmReq, sortedList, exampleListPool);
List<String> sqlTaskPool = new CopyOnWriteArrayList<>();
sqlPromptPool.parallelStream().forEach(sqlPrompt -> {
Prompt linkingPrompt = PromptTemplate.from(JsonUtil.toString(sqlPrompt)).apply(new HashMap<>());
keyPipelineLog.info("step two request prompt:{}", linkingPrompt.toSystemMessage());
Response<AiMessage> sqlResult = chatLanguageModel.generate(linkingPrompt.toSystemMessage());
String result = sqlResult.content().text();
keyPipelineLog.info("step two model response:{}", result);
sqlTaskPool.add(result);
});
//4.format response.
Pair<String, Map<String, Double>> sqlMapPair = OutputFormat.selfConsistencyVote(sqlTaskPool);
keyPipelineLog.info("linkingMap:{} sqlMap:{}", linkingMap, sqlMapPair.getRight());
LLMResp llmResp = new LLMResp();
llmResp.setQuery(llmReq.getQueryText());
llmResp.setSqlRespMap(OutputFormat.buildSqlRespMap(sqlExamples, sqlMapPair.getRight()));
return llmResp;
}
@Override
public void afterPropertiesSet() {
SqlGenerationFactory.addSqlGenerationForFactory(SqlGenerationMode.TWO_PASS_AUTO_COT_SELF_CONSISTENCY, this);
}
}
| [
"dev.langchain4j.model.input.PromptTemplate.from"
] | [((2481, 2557), 'dev.langchain4j.model.input.PromptTemplate.from'), ((3537, 3609), 'dev.langchain4j.model.input.PromptTemplate.from')] |
package org.example;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.memory.chat.ChatMemoryProvider;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.memory.chat.ChatMemoryStore;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.List;
public class _09_AIServices_06_ChatMemoryPersisted {
public static void main(String[] args) {
OpenAiChatModel model = OpenAiChatModel.withApiKey(ApiKeys.OPENAI_DEMO);
FileStore store = new FileStore();
ChatMemoryProvider provider = memoryId -> MessageWindowChatMemory.builder()
.id(memoryId)
.maxMessages(10)
.chatMemoryStore(store)
.build();
ChatAssistant assistant = AiServices.builder(ChatAssistant.class)
.chatLanguageModel(model)
.chatMemoryProvider(provider)
.build();
System.out.println(assistant.chat(1, "Hello my name is Michael"));
System.out.println(assistant.chat(2, "Hello my name is Karl"));
// System.out.println(assistant.chat(1, "What is my name?"));
// System.out.println(assistant.chat(2, "What is my name?"));
}
}
class FileStore implements ChatMemoryStore {
public static final String PATH = "src/main/resources/messages_%s.txt";
@Override
public List<ChatMessage> getMessages(Object memoryId) {
List<ChatMessage> chatMessages = new ArrayList<>();
String file = PATH.formatted(memoryId);
try {
if (!Files.exists(Paths.get(file))) {
Files.createFile(Paths.get(file));
}
for (String s : Files.readAllLines(Paths.get(file))) {
chatMessages.add(UserMessage.from(s));
}
} catch (IOException e) {
throw new RuntimeException(e);
}
return chatMessages;
}
@Override
public void updateMessages(Object memoryId, List<ChatMessage> messages) {
String file = PATH.formatted(memoryId);
for (ChatMessage message : messages) {
try {
Files.writeString(Paths.get(file), message.text() + "\n", StandardOpenOption.APPEND);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
@Override
public void deleteMessages(Object memoryId) {
System.out.println("Not implemented");
}
} | [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.memory.chat.MessageWindowChatMemory.builder"
] | [((843, 1004), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((843, 979), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((843, 939), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((843, 906), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((1041, 1193), 'dev.langchain4j.service.AiServices.builder'), ((1041, 1168), 'dev.langchain4j.service.AiServices.builder'), ((1041, 1122), 'dev.langchain4j.service.AiServices.builder')] |
package org.agoncal.fascicle.langchain4j.vectordb.pgvector;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingMatch;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore;
import java.util.List;
// tag::adocSkip[]
/**
* @author Antonio Goncalves
* http://www.antoniogoncalves.org
* --
*/
// end::adocSkip[]
public class MusicianService {
public static void main(String[] args) {
MusicianService musicianService = new MusicianService();
musicianService.usePGVectorToStoreEmbeddings();
}
public void usePGVectorToStoreEmbeddings() {
System.out.println("### usePGVectorToStoreEmbeddings");
// tag::adocSnippet[]
EmbeddingStore<TextSegment> embeddingStore =
PgVectorEmbeddingStore.builder()
.host("localhost")
.port(5432)
.createTable(true)
.dropTableFirst(true)
.dimension(384)
.table("langchain4j_collection")
.user("agoncal")
.password("agoncal")
.database("agoncal")
.build();
// end::adocSnippet[]
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
TextSegment segment1 = TextSegment.from("I've been to France twice.");
Embedding embedding1 = embeddingModel.embed(segment1).content();
embeddingStore.add(embedding1, segment1);
TextSegment segment2 = TextSegment.from("New Delhi is the capital of India.");
Embedding embedding2 = embeddingModel.embed(segment2).content();
embeddingStore.add(embedding2, segment2);
Embedding queryEmbedding = embeddingModel.embed("Did you ever travel abroad?").content();
List<EmbeddingMatch<TextSegment>> relevant = embeddingStore.findRelevant(queryEmbedding, 1);
EmbeddingMatch<TextSegment> embeddingMatch = relevant.get(0);
System.out.println(embeddingMatch.score());
System.out.println(embeddingMatch.embedded().text());
}
}
| [
"dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder"
] | [((989, 1290), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((989, 1273), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((989, 1244), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((989, 1215), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((989, 1190), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((989, 1149), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((989, 1125), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((989, 1095), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((989, 1068), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((989, 1048), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder')] |
package com.ramesh.langchain;
import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument;
import static java.time.Duration.ofSeconds;
import dev.langchain4j.chain.ConversationalRetrievalChain;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2QuantizedEmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
/***
* This project demonstrates how to use LangChain to ingest data from a document and
* get responses for prompts from the same, by creating a LangChain Chain
*/
public class ChainWithDocumentLive {
// Open AI Key and Chat GPT Model to use
public static String OPENAI_API_KEY = "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";
public static String OPENAI_MODEL = "gpt-3.5-turbo";
public static void main(String[] args) {
// embedding model to yse
EmbeddingModel embeddingModel = new AllMiniLmL6V2QuantizedEmbeddingModel();
// embeddings will be stored in memory
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
//Creating instance of EmbeddingStoreIngestor
System.out.println("Creating instance of EmbeddingStoreIngestor...");
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(DocumentSplitters.recursive(500, 0))
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
// ingesting input data
System.out.println("Loading content from simpsons_adventures.txt and ingesting...");
Document document = loadDocument(".\\simpsons_adventures.txt");
ingestor.ingest(document);
// building the chat model
ChatLanguageModel chatModel = OpenAiChatModel.builder()
.apiKey(OPENAI_API_KEY)
.timeout(ofSeconds(60))
.build();
// Building LangChain with Embeddings Retriever
System.out.println("Building LangChain with Embeddings Retriever...");
ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder()
.chatLanguageModel(chatModel)
.retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel))
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.promptTemplate(PromptTemplate.from("Answer the following question to the best of your ability: {{question}}\n\nBase your answer on the following information:\n{{information}}"))
.build();
// prompting ChatGPT
System.out.println("Prompting ChatGPT \"Who is Simpson?\"...");
System.out.println("\nFetching response from ChatGPT via the created LangChain...\n");
// executing the LangChain chain
String answer = chain.execute("Who is Simpson?");
System.out.println(answer);
}
}
| [
"dev.langchain4j.chain.ConversationalRetrievalChain.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((1849, 2057), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1849, 2036), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1849, 1992), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1849, 1948), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2366, 2484), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2366, 2463), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2366, 2427), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2667, 3113), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2667, 3092), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2667, 2901), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2667, 2832), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2667, 2747), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')] |
package io.quarkiverse.langchain4j.samples;
import static dev.langchain4j.data.document.splitter.DocumentSplitters.recursive;
import java.util.List;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import io.quarkiverse.langchain4j.pinecone.PineconeEmbeddingStore;
@ApplicationScoped
public class IngestorExampleWithPinecone {
/**
* The embedding store (the database).
* The bean is provided by the quarkus-langchain4j-pinecone extension.
*/
@Inject
PineconeEmbeddingStore store;
/**
* The embedding model (how is computed the vector of a document).
* The bean is provided by the LLM (like openai) extension.
*/
@Inject
EmbeddingModel embeddingModel;
public void ingest(List<Document> documents) {
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.embeddingStore(store)
.embeddingModel(embeddingModel)
.documentSplitter(recursive(500, 0))
.build();
// Warning - this can take a long time...
ingestor.ingest(documents);
}
}
| [
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((1005, 1202), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1005, 1177), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1005, 1124), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1005, 1076), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
import dev.langchain4j.data.document.FileSystemDocumentLoader;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiEmbeddingModel;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import static java.util.stream.Collectors.joining;
public class _03_Retrieval {
private static final String RETRIEVER_DOCUMENT_NAME = "";
public static void main(String[] args) {
var openAiKey = System.getenv("OPENAI_API_KEY");
var embeddingModel = OpenAiEmbeddingModel.withApiKey(openAiKey);
var embeddingStore = new InMemoryEmbeddingStore<TextSegment>();
// 0 - Ingesting the document and store in vectorized form
var ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(DocumentSplitters.recursive(500, 0))
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
var filePath = toPath(RETRIEVER_DOCUMENT_NAME);
var document = FileSystemDocumentLoader.loadDocument(filePath);
ingestor.ingest(document);
var chatModel = OpenAiChatModel.withApiKey(openAiKey);
var chatMemory = MessageWindowChatMemory.withMaxMessages(10);
var retriever = EmbeddingStoreRetriever.from(embeddingStore, embeddingModel);
var promptTemplate = PromptTemplate.from("""
Answer the following question to the best of your ability: {{question}}
Base your answer on the following information:
{{information}}""");
try (Scanner scanner = new Scanner(System.in)) {
while (true) {
System.out.println("\nEnter your question: ");
// 1 - Retrieving the question from the user
String question = scanner.nextLine();
if (question.equals("exit")) {
break;
}
// 2, 3 - Retrieving the most relevant segments according to the question
var relevantSegments = retriever.findRelevant(question);
var prompt = promptTemplate.apply(
Map.of(
"question", question,
"information", format(relevantSegments)));
chatMemory.add(prompt.toUserMessage());
// 4 - Send the prompt to the model
var response = chatModel.generate(chatMemory.messages());
chatMemory.add(response.content());
// 5 - Printing answer to the user
System.out.println(response.content().text());
System.out.println("\n\n########### TOKEN USAGE ############\n");
System.out.println(response.tokenUsage());
}
}
}
private static String format(List<TextSegment> relevantSegments) {
return relevantSegments.stream()
.map(TextSegment::text)
.map(segment -> "..." + segment + "...")
.collect(joining("\n\n"));
}
private static Path toPath(String fileName) {
try {
URL fileUrl = _03_Retrieval.class.getResource(fileName);
return Paths.get(fileUrl.toURI());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
}
| [
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((1262, 1486), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1262, 1461), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1262, 1413), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1262, 1365), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
package org.example;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.output.structured.Description;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.SystemMessage;
import dev.langchain4j.service.UserMessage;
import java.util.List;
public class _09_AIServices_04_PokemonTrainer {
public static void main(String[] args) {
// Zet logger op debug
OpenAiChatModel model = OpenAiChatModel.builder()
.apiKey(ApiKeys.OPENAI_DEMO)
.logRequests(true)
.build();
PokemonTrainerGeneratorService trainerGenerator = AiServices.create(PokemonTrainerGeneratorService.class, model);
Trainer trainer = trainerGenerator.generate("Generate a low level trainer named 'Kelvin' with 2 bug and 2 fire pokemon");
System.out.println(trainer);
}
}
interface PokemonTrainerGeneratorService {
@SystemMessage("You generate random pokemon trainers with random pokemon, in accordance to the user message")
Trainer generate(@UserMessage String text);
}
record Trainer(String name, List<Pokemon> team) {
}
record Pokemon(String name
// , @Description("All uppercase") String type
, String type
, int level
, int hp
, @Description("Random number of moves between 1 and 4") List<String> moves)
{} | [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((450, 580), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((450, 555), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((450, 520), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
import dev.ai4j.openai4j.Model;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
public class _00_Model {
public static void main(String[] args) {
String openAiKey = System.getenv("OPENAI_API_KEY");
ChatLanguageModel chatModel = OpenAiChatModel.builder()
.modelName(Model.GPT_3_5_TURBO.stringValue())
.apiKey(openAiKey)
.build();
var prompt = "Write hello world example in Java printing 'Hello TDC Future 2023'";
var response = chatModel.generate(UserMessage.from(prompt));
System.out.println(response.content().text());
System.out.println("\n\n########### TOKEN USAGE ############\n");
System.out.println(response.tokenUsage());
}
} | [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((359, 506), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((359, 481), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((359, 446), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((412, 445), 'dev.ai4j.openai4j.Model.GPT_3_5_TURBO.stringValue')] |
package com.example.application.services;
import com.vaadin.flow.server.auth.AnonymousAllowed;
import dev.hilla.BrowserCallable;
import dev.langchain4j.memory.chat.TokenWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.TokenStream;
import jakarta.annotation.PostConstruct;
import org.springframework.beans.factory.annotation.Value;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Sinks;
@BrowserCallable
@AnonymousAllowed
public class ChatService {
@Value("${openai.api.key}")
private String OPENAI_API_KEY;
private Assistant assistant;
private StreamingAssistant streamingAssistant;
interface Assistant {
String chat(String message);
}
interface StreamingAssistant {
TokenStream chat(String message);
}
@PostConstruct
public void init() {
var memory = TokenWindowChatMemory.withMaxTokens(2000, new OpenAiTokenizer("gpt-3.5-turbo"));
assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(OPENAI_API_KEY))
.chatMemory(memory)
.build();
streamingAssistant = AiServices.builder(StreamingAssistant.class)
.streamingChatLanguageModel(OpenAiStreamingChatModel.withApiKey(OPENAI_API_KEY))
.chatMemory(memory)
.build();
}
public String chat(String message) {
return assistant.chat(message);
}
public Flux<String> chatStream(String message) {
Sinks.Many<String> sink = Sinks.many().unicast().onBackpressureBuffer();
streamingAssistant.chat(message)
.onNext(sink::tryEmitNext)
.onComplete(sink::tryEmitComplete)
.onError(sink::tryEmitError)
.start();
return sink.asFlux();
}
}
| [
"dev.langchain4j.service.AiServices.builder"
] | [((1152, 1327), 'dev.langchain4j.service.AiServices.builder'), ((1152, 1302), 'dev.langchain4j.service.AiServices.builder'), ((1152, 1266), 'dev.langchain4j.service.AiServices.builder'), ((1359, 1561), 'dev.langchain4j.service.AiServices.builder'), ((1359, 1536), 'dev.langchain4j.service.AiServices.builder'), ((1359, 1500), 'dev.langchain4j.service.AiServices.builder'), ((1745, 1790), 'reactor.core.publisher.Sinks.many'), ((1745, 1767), 'reactor.core.publisher.Sinks.many')] |
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.input.structured.StructuredPrompt;
import dev.langchain4j.model.input.structured.StructuredPromptProcessor;
import dev.langchain4j.model.openai.OpenAiChatModel;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static java.time.Duration.ofSeconds;
import static java.util.Arrays.asList;
public class _03_PromptTemplate {
static class Simple_Prompt_Template_Example {
public static void main(String[] args) {
ChatLanguageModel model = OpenAiChatModel.builder()
.apiKey(ApiKeys.OPENAI_API_KEY)
.timeout(ofSeconds(60))
.build();
String template = "Create a recipe for a {{dishType}} with the following ingredients: {{ingredients}}";
PromptTemplate promptTemplate = PromptTemplate.from(template);
Map<String, Object> variables = new HashMap<>();
variables.put("dishType", "oven dish");
variables.put("ingredients", "potato, tomato, feta, olive oil");
Prompt prompt = promptTemplate.apply(variables);
String response = model.generate(prompt.text());
System.out.println(response);
}
}
static class Structured_Prompt_Template_Example {
@StructuredPrompt({
"Create a recipe of a {{dish}} that can be prepared using only {{ingredients}}.",
"Structure your answer in the following way:",
"Recipe name: ...",
"Description: ...",
"Preparation time: ...",
"Required ingredients:",
"- ...",
"- ...",
"Instructions:",
"- ...",
"- ..."
})
static class CreateRecipePrompt {
String dish;
List<String> ingredients;
CreateRecipePrompt(String dish, List<String> ingredients) {
this.dish = dish;
this.ingredients = ingredients;
}
}
public static void main(String[] args) {
ChatLanguageModel model = OpenAiChatModel.builder()
.apiKey(ApiKeys.OPENAI_API_KEY)
.timeout(ofSeconds(60))
.build();
Structured_Prompt_Template_Example.CreateRecipePrompt createRecipePrompt = new Structured_Prompt_Template_Example.CreateRecipePrompt(
"salad",
asList("cucumber", "tomato", "feta", "onion", "olives")
);
Prompt prompt = StructuredPromptProcessor.toPrompt(createRecipePrompt);
String recipe = model.generate(prompt.text());
System.out.println(recipe);
}
}
}
| [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((668, 818), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((668, 789), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((668, 745), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2305, 2455), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2305, 2426), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2305, 2382), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
/*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gemini.workshop;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ImageContent;
import dev.langchain4j.data.message.TextContent;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.output.Response;
public class Step3_Multimodal {
static final String CAT_IMAGE_URL =
"https://upload.wikimedia.org/wikipedia/commons/e/e9/" +
"Felis_silvestris_silvestris_small_gradual_decrease_of_quality.png";
public static void main(String[] args) {
ChatLanguageModel model = VertexAiGeminiChatModel.builder()
.project(System.getenv("PROJECT_ID"))
.location(System.getenv("LOCATION"))
.modelName("gemini-1.0-pro-vision")
.build();
UserMessage userMessage = UserMessage.from(
ImageContent.from(CAT_IMAGE_URL),
TextContent.from("Describe the picture")
);
Response<AiMessage> response = model.generate(userMessage);
System.out.println(response.content().text());
}
}
| [
"dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder"
] | [((1277, 1478), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1277, 1457), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1277, 1409), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((1277, 1360), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder')] |
package dev.langchain4j.model.openai;
import dev.ai4j.openai4j.chat.*;
import dev.ai4j.openai4j.completion.CompletionChoice;
import dev.ai4j.openai4j.completion.CompletionResponse;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.model.Tokenizer;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.model.output.TokenUsage;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import static dev.langchain4j.model.openai.InternalOpenAiHelper.finishReasonFrom;
import static java.util.Collections.singletonList;
import static java.util.stream.Collectors.toList;
/**
* This class needs to be thread safe because it is called when a streaming result comes back
* and there is no guarantee that this thread will be the same as the one that initiated the request,
* in fact it almost certainly won't be.
*/
public class OpenAiStreamingResponseBuilder {
private final StringBuffer contentBuilder = new StringBuffer();
private final StringBuffer toolNameBuilder = new StringBuffer();
private final StringBuffer toolArgumentsBuilder = new StringBuffer();
private final Map<Integer, ToolExecutionRequestBuilder> indexToToolExecutionRequestBuilder = new ConcurrentHashMap<>();
private volatile String finishReason;
private final Integer inputTokenCount;
public OpenAiStreamingResponseBuilder(Integer inputTokenCount) {
this.inputTokenCount = inputTokenCount;
}
public void append(ChatCompletionResponse partialResponse) {
if (partialResponse == null) {
return;
}
List<ChatCompletionChoice> choices = partialResponse.choices();
if (choices == null || choices.isEmpty()) {
return;
}
ChatCompletionChoice chatCompletionChoice = choices.get(0);
if (chatCompletionChoice == null) {
return;
}
String finishReason = chatCompletionChoice.finishReason();
if (finishReason != null) {
this.finishReason = finishReason;
}
Delta delta = chatCompletionChoice.delta();
if (delta == null) {
return;
}
String content = delta.content();
if (content != null) {
contentBuilder.append(content);
return;
}
if (delta.functionCall() != null) {
FunctionCall functionCall = delta.functionCall();
if (functionCall.name() != null) {
toolNameBuilder.append(functionCall.name());
}
if (functionCall.arguments() != null) {
toolArgumentsBuilder.append(functionCall.arguments());
}
}
if (delta.toolCalls() != null && !delta.toolCalls().isEmpty()) {
ToolCall toolCall = delta.toolCalls().get(0);
ToolExecutionRequestBuilder toolExecutionRequestBuilder
= indexToToolExecutionRequestBuilder.computeIfAbsent(toolCall.index(), idx -> new ToolExecutionRequestBuilder());
if (toolCall.id() != null) {
toolExecutionRequestBuilder.idBuilder.append(toolCall.id());
}
FunctionCall functionCall = toolCall.function();
if (functionCall.name() != null) {
toolExecutionRequestBuilder.nameBuilder.append(functionCall.name());
}
if (functionCall.arguments() != null) {
toolExecutionRequestBuilder.argumentsBuilder.append(functionCall.arguments());
}
}
}
public void append(CompletionResponse partialResponse) {
if (partialResponse == null) {
return;
}
List<CompletionChoice> choices = partialResponse.choices();
if (choices == null || choices.isEmpty()) {
return;
}
CompletionChoice completionChoice = choices.get(0);
if (completionChoice == null) {
return;
}
String finishReason = completionChoice.finishReason();
if (finishReason != null) {
this.finishReason = finishReason;
}
String token = completionChoice.text();
if (token != null) {
contentBuilder.append(token);
}
}
public Response<AiMessage> build(Tokenizer tokenizer, boolean forcefulToolExecution) {
String content = contentBuilder.toString();
if (!content.isEmpty()) {
return Response.from(
AiMessage.from(content),
tokenUsage(content, tokenizer),
finishReasonFrom(finishReason)
);
}
String toolName = toolNameBuilder.toString();
if (!toolName.isEmpty()) {
ToolExecutionRequest toolExecutionRequest = ToolExecutionRequest.builder()
.name(toolName)
.arguments(toolArgumentsBuilder.toString())
.build();
return Response.from(
AiMessage.from(toolExecutionRequest),
tokenUsage(singletonList(toolExecutionRequest), tokenizer, forcefulToolExecution),
finishReasonFrom(finishReason)
);
}
if (!indexToToolExecutionRequestBuilder.isEmpty()) {
List<ToolExecutionRequest> toolExecutionRequests = indexToToolExecutionRequestBuilder.values().stream()
.map(it -> ToolExecutionRequest.builder()
.id(it.idBuilder.toString())
.name(it.nameBuilder.toString())
.arguments(it.argumentsBuilder.toString())
.build())
.collect(toList());
return Response.from(
AiMessage.from(toolExecutionRequests),
tokenUsage(toolExecutionRequests, tokenizer, forcefulToolExecution),
finishReasonFrom(finishReason)
);
}
return null;
}
private TokenUsage tokenUsage(String content, Tokenizer tokenizer) {
if (tokenizer == null) {
return null;
}
int outputTokenCount = tokenizer.estimateTokenCountInText(content);
return new TokenUsage(inputTokenCount, outputTokenCount);
}
private TokenUsage tokenUsage(List<ToolExecutionRequest> toolExecutionRequests, Tokenizer tokenizer, boolean forcefulToolExecution) {
if (tokenizer == null) {
return null;
}
int outputTokenCount = 0;
if (forcefulToolExecution) {
// OpenAI calculates output tokens differently when tool is executed forcefully
for (ToolExecutionRequest toolExecutionRequest : toolExecutionRequests) {
outputTokenCount += tokenizer.estimateTokenCountInForcefulToolExecutionRequest(toolExecutionRequest);
}
} else {
outputTokenCount = tokenizer.estimateTokenCountInToolExecutionRequests(toolExecutionRequests);
}
return new TokenUsage(inputTokenCount, outputTokenCount);
}
private static class ToolExecutionRequestBuilder {
private final StringBuffer idBuilder = new StringBuffer();
private final StringBuffer nameBuilder = new StringBuffer();
private final StringBuffer argumentsBuilder = new StringBuffer();
}
}
| [
"dev.langchain4j.agent.tool.ToolExecutionRequest.builder"
] | [((4860, 5019), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((4860, 4990), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((4860, 4926), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((5501, 5757), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((5501, 5720), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((5501, 5649), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((5501, 5588), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder')] |
package io.quarkiverse.langchain4j.huggingface;
import static java.util.stream.Collectors.joining;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.time.Duration;
import java.util.List;
import java.util.Optional;
import java.util.OptionalDouble;
import java.util.OptionalInt;
import dev.langchain4j.agent.tool.ToolSpecification;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.huggingface.client.HuggingFaceClient;
import dev.langchain4j.model.huggingface.client.Options;
import dev.langchain4j.model.huggingface.client.Parameters;
import dev.langchain4j.model.huggingface.client.TextGenerationRequest;
import dev.langchain4j.model.huggingface.client.TextGenerationResponse;
import dev.langchain4j.model.huggingface.spi.HuggingFaceClientFactory;
import dev.langchain4j.model.output.Response;
/**
* This is a Quarkus specific version of the HuggingFace model.
* <p>
* TODO: remove this in the future when the stock {@link dev.langchain4j.model.huggingface.HuggingFaceChatModel}
* has been updated to fit our needs (i.e. allowing {@code returnFullText} to be null and making {code accessToken} optional)
*/
public class QuarkusHuggingFaceChatModel implements ChatLanguageModel {
public static final QuarkusHuggingFaceClientFactory CLIENT_FACTORY = new QuarkusHuggingFaceClientFactory();
private final HuggingFaceClient client;
private final Double temperature;
private final Integer maxNewTokens;
private final Boolean returnFullText;
private final Boolean waitForModel;
private final Optional<Boolean> doSample;
private final OptionalDouble topP;
private final OptionalInt topK;
private final OptionalDouble repetitionPenalty;
private QuarkusHuggingFaceChatModel(Builder builder) {
this.client = CLIENT_FACTORY.create(builder, new HuggingFaceClientFactory.Input() {
@Override
public String apiKey() {
return builder.accessToken;
}
@Override
public String modelId() {
throw new UnsupportedOperationException("Should not be called");
}
@Override
public Duration timeout() {
return builder.timeout;
}
}, builder.url);
this.temperature = builder.temperature;
this.maxNewTokens = builder.maxNewTokens;
this.returnFullText = builder.returnFullText;
this.waitForModel = builder.waitForModel;
this.doSample = builder.doSample;
this.topP = builder.topP;
this.topK = builder.topK;
this.repetitionPenalty = builder.repetitionPenalty;
}
public static Builder builder() {
return new Builder();
}
@Override
public Response<AiMessage> generate(List<ChatMessage> messages) {
Parameters.Builder builder = Parameters.builder()
.temperature(temperature)
.maxNewTokens(maxNewTokens)
.returnFullText(returnFullText);
doSample.ifPresent(builder::doSample);
topK.ifPresent(builder::topK);
topP.ifPresent(builder::topP);
repetitionPenalty.ifPresent(builder::repetitionPenalty);
Parameters parameters = builder
.build();
TextGenerationRequest request = TextGenerationRequest.builder()
.inputs(messages.stream()
.map(ChatMessage::text)
.collect(joining("\n")))
.parameters(parameters)
.options(Options.builder()
.waitForModel(waitForModel)
.build())
.build();
TextGenerationResponse textGenerationResponse = client.chat(request);
return Response.from(AiMessage.from(textGenerationResponse.generatedText()));
}
@Override
public Response<AiMessage> generate(List<ChatMessage> messages, List<ToolSpecification> toolSpecifications) {
throw new IllegalArgumentException("Tools are currently not supported for HuggingFace models");
}
@Override
public Response<AiMessage> generate(List<ChatMessage> messages, ToolSpecification toolSpecification) {
throw new IllegalArgumentException("Tools are currently not supported for HuggingFace models");
}
public static final class Builder {
private String accessToken;
private Duration timeout = Duration.ofSeconds(15);
private Double temperature;
private Integer maxNewTokens;
private Boolean returnFullText;
private Boolean waitForModel = true;
private URI url;
private Optional<Boolean> doSample;
private OptionalInt topK;
private OptionalDouble topP;
private OptionalDouble repetitionPenalty;
public boolean logResponses;
public boolean logRequests;
public Builder accessToken(String accessToken) {
this.accessToken = accessToken;
return this;
}
public Builder url(URL url) {
try {
this.url = url.toURI();
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
return this;
}
public Builder timeout(Duration timeout) {
this.timeout = timeout;
return this;
}
public Builder temperature(Double temperature) {
this.temperature = temperature;
return this;
}
public Builder maxNewTokens(Integer maxNewTokens) {
this.maxNewTokens = maxNewTokens;
return this;
}
public Builder returnFullText(Boolean returnFullText) {
this.returnFullText = returnFullText;
return this;
}
public Builder waitForModel(Boolean waitForModel) {
this.waitForModel = waitForModel;
return this;
}
public Builder doSample(Optional<Boolean> doSample) {
this.doSample = doSample;
return this;
}
public Builder topK(OptionalInt topK) {
this.topK = topK;
return this;
}
public Builder topP(OptionalDouble topP) {
this.topP = topP;
return this;
}
public Builder repetitionPenalty(OptionalDouble repetitionPenalty) {
this.repetitionPenalty = repetitionPenalty;
return this;
}
public QuarkusHuggingFaceChatModel build() {
return new QuarkusHuggingFaceChatModel(this);
}
public Builder logRequests(boolean logRequests) {
this.logRequests = logRequests;
return this;
}
public Builder logResponses(boolean logResponses) {
this.logResponses = logResponses;
return this;
}
}
}
| [
"dev.langchain4j.model.huggingface.client.Parameters.builder",
"dev.langchain4j.model.huggingface.client.TextGenerationRequest.builder",
"dev.langchain4j.model.huggingface.client.Options.builder"
] | [((2990, 3144), 'dev.langchain4j.model.huggingface.client.Parameters.builder'), ((2990, 3096), 'dev.langchain4j.model.huggingface.client.Parameters.builder'), ((2990, 3052), 'dev.langchain4j.model.huggingface.client.Parameters.builder'), ((3444, 3808), 'dev.langchain4j.model.huggingface.client.TextGenerationRequest.builder'), ((3444, 3783), 'dev.langchain4j.model.huggingface.client.TextGenerationRequest.builder'), ((3444, 3654), 'dev.langchain4j.model.huggingface.client.TextGenerationRequest.builder'), ((3444, 3614), 'dev.langchain4j.model.huggingface.client.TextGenerationRequest.builder'), ((3680, 3782), 'dev.langchain4j.model.huggingface.client.Options.builder'), ((3680, 3749), 'dev.langchain4j.model.huggingface.client.Options.builder')] |
package io.quarkiverse.langchain4j.runtime.aiservice;
import static dev.langchain4j.data.message.UserMessage.userMessage;
import static dev.langchain4j.internal.Exceptions.runtime;
import static dev.langchain4j.service.AiServices.removeToolMessages;
import static dev.langchain4j.service.AiServices.verifyModerationIfNeeded;
import static dev.langchain4j.service.ServiceOutputParser.parse;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.function.Consumer;
import java.util.function.Function;
import org.jboss.logging.Logger;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.agent.tool.ToolExecutor;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.SystemMessage;
import dev.langchain4j.data.message.ToolExecutionResultMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.input.structured.StructuredPrompt;
import dev.langchain4j.model.input.structured.StructuredPromptProcessor;
import dev.langchain4j.model.moderation.Moderation;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.model.output.TokenUsage;
import dev.langchain4j.rag.query.Metadata;
import dev.langchain4j.service.AiServiceContext;
import dev.langchain4j.service.AiServiceTokenStream;
import dev.langchain4j.service.TokenStream;
import io.quarkiverse.langchain4j.audit.Audit;
import io.quarkiverse.langchain4j.audit.AuditService;
import io.quarkus.arc.Arc;
import io.quarkus.arc.ArcContainer;
import io.quarkus.arc.ManagedContext;
import io.smallrye.mutiny.Multi;
import io.smallrye.mutiny.infrastructure.Infrastructure;
import io.smallrye.mutiny.subscription.MultiEmitter;
/**
* Provides the basic building blocks that the generated Interface methods call into
*/
public class AiServiceMethodImplementationSupport {
private static final Logger log = Logger.getLogger(AiServiceMethodImplementationSupport.class);
private static final int MAX_SEQUENTIAL_TOOL_EXECUTIONS = 10;
/**
* This method is called by the implementations of each ai service method.
*/
public Object implement(Input input) {
QuarkusAiServiceContext context = input.context;
AiServiceMethodCreateInfo createInfo = input.createInfo;
Object[] methodArgs = input.methodArgs;
AuditService auditService = context.auditService;
Audit audit = null;
if (auditService != null) {
audit = auditService.create(new Audit.CreateInfo(createInfo.getInterfaceName(), createInfo.getMethodName(),
methodArgs, createInfo.getMemoryIdParamPosition()));
}
// TODO: add validation
try {
var result = doImplement(createInfo, methodArgs, context, audit);
if (audit != null) {
audit.onCompletion(result);
auditService.complete(audit);
}
return result;
} catch (Exception e) {
log.errorv(e, "Execution of {0}#{1} failed", createInfo.getInterfaceName(), createInfo.getMethodName());
if (audit != null) {
audit.onFailure(e);
auditService.complete(audit);
}
throw e;
}
}
private static Object doImplement(AiServiceMethodCreateInfo createInfo, Object[] methodArgs,
QuarkusAiServiceContext context, Audit audit) {
Optional<SystemMessage> systemMessage = prepareSystemMessage(createInfo, methodArgs);
UserMessage userMessage = prepareUserMessage(context, createInfo, methodArgs);
if (audit != null) {
audit.initialMessages(systemMessage, userMessage);
}
Object memoryId = memoryId(createInfo, methodArgs, context.chatMemoryProvider != null);
if (context.retrievalAugmentor != null) { // TODO extract method/class
List<ChatMessage> chatMemory = context.hasChatMemory()
? context.chatMemory(memoryId).messages()
: null;
Metadata metadata = Metadata.from(userMessage, memoryId, chatMemory);
userMessage = context.retrievalAugmentor.augment(userMessage, metadata);
}
// TODO give user ability to provide custom OutputParser
String outputFormatInstructions = createInfo.getUserMessageInfo().getOutputFormatInstructions();
userMessage = UserMessage.from(userMessage.text() + outputFormatInstructions);
if (context.hasChatMemory()) {
ChatMemory chatMemory = context.chatMemory(memoryId);
if (systemMessage.isPresent()) {
chatMemory.add(systemMessage.get());
}
chatMemory.add(userMessage);
}
List<ChatMessage> messages;
if (context.hasChatMemory()) {
messages = context.chatMemory(memoryId).messages();
} else {
messages = new ArrayList<>();
systemMessage.ifPresent(messages::add);
messages.add(userMessage);
}
Class<?> returnType = createInfo.getReturnType();
if (returnType.equals(TokenStream.class)) {
return new AiServiceTokenStream(messages, context, memoryId);
}
if (returnType.equals(Multi.class)) {
return Multi.createFrom().emitter(new Consumer<MultiEmitter<? super String>>() {
@Override
public void accept(MultiEmitter<? super String> em) {
new AiServiceTokenStream(messages, context, memoryId)
.onNext(em::emit)
.onComplete(new Consumer<Response<AiMessage>>() {
@Override
public void accept(Response<AiMessage> message) {
em.complete();
}
})
.onError(em::fail)
.start();
}
});
}
Future<Moderation> moderationFuture = triggerModerationIfNeeded(context, createInfo, messages);
log.debug("Attempting to obtain AI response");
Response<AiMessage> response = context.toolSpecifications == null
? context.chatModel.generate(messages)
: context.chatModel.generate(messages, context.toolSpecifications);
log.debug("AI response obtained");
if (audit != null) {
audit.addLLMToApplicationMessage(response);
}
TokenUsage tokenUsageAccumulator = response.tokenUsage();
verifyModerationIfNeeded(moderationFuture);
int executionsLeft = MAX_SEQUENTIAL_TOOL_EXECUTIONS;
while (true) {
if (executionsLeft-- == 0) {
throw runtime("Something is wrong, exceeded %s sequential tool executions",
MAX_SEQUENTIAL_TOOL_EXECUTIONS);
}
AiMessage aiMessage = response.content();
if (context.hasChatMemory()) {
context.chatMemory(memoryId).add(response.content());
}
if (!aiMessage.hasToolExecutionRequests()) {
break;
}
ChatMemory chatMemory = context.chatMemory(memoryId);
for (ToolExecutionRequest toolExecutionRequest : aiMessage.toolExecutionRequests()) {
log.debugv("Attempting to execute tool {0}", toolExecutionRequest);
ToolExecutor toolExecutor = context.toolExecutors.get(toolExecutionRequest.name());
if (toolExecutor == null) {
throw runtime("Tool executor %s not found", toolExecutionRequest.name());
}
String toolExecutionResult = toolExecutor.execute(toolExecutionRequest, memoryId);
log.debugv("Result of {0} is '{1}'", toolExecutionRequest, toolExecutionResult);
ToolExecutionResultMessage toolExecutionResultMessage = ToolExecutionResultMessage.from(
toolExecutionRequest,
toolExecutionResult);
if (audit != null) {
audit.addApplicationToLLMMessage(toolExecutionResultMessage);
}
chatMemory.add(toolExecutionResultMessage);
}
log.debug("Attempting to obtain AI response");
response = context.chatModel.generate(chatMemory.messages(), context.toolSpecifications);
log.debug("AI response obtained");
if (audit != null) {
audit.addLLMToApplicationMessage(response);
}
tokenUsageAccumulator = tokenUsageAccumulator.add(response.tokenUsage());
}
response = Response.from(response.content(), tokenUsageAccumulator, response.finishReason());
return parse(response, returnType);
}
private static Future<Moderation> triggerModerationIfNeeded(AiServiceContext context,
AiServiceMethodCreateInfo createInfo,
List<ChatMessage> messages) {
Future<Moderation> moderationFuture = null;
if (createInfo.isRequiresModeration()) {
log.debug("Moderation is required and it will be executed in the background");
// TODO: don't occupy a worker thread for this and instead use the reactive API provided by the client
ExecutorService defaultExecutor = (ExecutorService) Infrastructure.getDefaultExecutor();
moderationFuture = defaultExecutor.submit(new Callable<>() {
@Override
public Moderation call() {
List<ChatMessage> messagesToModerate = removeToolMessages(messages);
log.debug("Attempting to moderate messages");
var result = context.moderationModel.moderate(messagesToModerate).content();
log.debug("Moderation completed");
return result;
}
});
}
return moderationFuture;
}
private static Optional<SystemMessage> prepareSystemMessage(AiServiceMethodCreateInfo createInfo, Object[] methodArgs) {
if (createInfo.getSystemMessageInfo().isEmpty()) {
return Optional.empty();
}
AiServiceMethodCreateInfo.TemplateInfo systemMessageInfo = createInfo.getSystemMessageInfo().get();
Map<String, Object> templateParams = new HashMap<>();
Map<String, Integer> nameToParamPosition = systemMessageInfo.getNameToParamPosition();
for (var entry : nameToParamPosition.entrySet()) {
templateParams.put(entry.getKey(), methodArgs[entry.getValue()]);
}
Prompt prompt = PromptTemplate.from(systemMessageInfo.getText()).apply(templateParams);
return Optional.of(prompt.toSystemMessage());
}
private static UserMessage prepareUserMessage(AiServiceContext context, AiServiceMethodCreateInfo createInfo,
Object[] methodArgs) {
AiServiceMethodCreateInfo.UserMessageInfo userMessageInfo = createInfo.getUserMessageInfo();
String userName = null;
if (userMessageInfo.getUserNameParamPosition().isPresent()) {
userName = methodArgs[userMessageInfo.getUserNameParamPosition().get()]
.toString(); // LangChain4j does this, but might want to make anything other than a String a build time error
}
if (userMessageInfo.getTemplate().isPresent()) {
AiServiceMethodCreateInfo.TemplateInfo templateInfo = userMessageInfo.getTemplate().get();
Map<String, Object> templateParams = new HashMap<>();
Map<String, Integer> nameToParamPosition = templateInfo.getNameToParamPosition();
for (var entry : nameToParamPosition.entrySet()) {
Object value = transformTemplateParamValue(methodArgs[entry.getValue()]);
templateParams.put(entry.getKey(), value);
}
// we do not need to apply the instructions as they have already been added to the template text at build time
Prompt prompt = PromptTemplate.from(templateInfo.getText()).apply(templateParams);
return createUserMessage(userName, prompt.text());
} else if (userMessageInfo.getParamPosition().isPresent()) {
Integer paramIndex = userMessageInfo.getParamPosition().get();
Object argValue = methodArgs[paramIndex];
if (argValue == null) {
throw new IllegalArgumentException(
"Unable to construct UserMessage for class '" + context.aiServiceClass.getName()
+ "' because parameter with index "
+ paramIndex + " is null");
}
return createUserMessage(userName, toString(argValue));
} else {
throw new IllegalStateException("Unable to construct UserMessage for class '" + context.aiServiceClass.getName()
+ "'. Please contact the maintainers");
}
}
private static UserMessage createUserMessage(String name, String text) {
if (name == null) {
return userMessage(text);
} else {
return userMessage(name, text);
}
}
private static Object transformTemplateParamValue(Object value) {
if (value.getClass().isArray()) {
// Qute does not transform these values but LangChain4j expects to be converted to a [item1, item2, item3] like systax
return Arrays.toString((Object[]) value);
}
return value;
}
private static Object memoryId(AiServiceMethodCreateInfo createInfo, Object[] methodArgs, boolean hasChatMemoryProvider) {
if (createInfo.getMemoryIdParamPosition().isPresent()) {
return methodArgs[createInfo.getMemoryIdParamPosition().get()];
}
if (hasChatMemoryProvider) {
// first we try to use the current context in order to make sure that we don't interleave chat messages of concurrent requests
ArcContainer container = Arc.container();
if (container != null) {
ManagedContext requestContext = container.requestContext();
if (requestContext.isActive()) {
return requestContext.getState();
}
}
}
// fallback to the default since there is nothing else we can really use here
return "default";
}
//TODO: share these methods with LangChain4j
private static String toString(Object arg) {
if (arg.getClass().isArray()) {
return arrayToString(arg);
} else if (arg.getClass().isAnnotationPresent(StructuredPrompt.class)) {
return StructuredPromptProcessor.toPrompt(arg).text();
} else {
return arg.toString();
}
}
private static String arrayToString(Object arg) {
StringBuilder sb = new StringBuilder("[");
int length = Array.getLength(arg);
for (int i = 0; i < length; i++) {
sb.append(toString(Array.get(arg, i)));
if (i < length - 1) {
sb.append(", ");
}
}
sb.append("]");
return sb.toString();
}
public static class Input {
final QuarkusAiServiceContext context;
final AiServiceMethodCreateInfo createInfo;
final Object[] methodArgs;
public Input(QuarkusAiServiceContext context, AiServiceMethodCreateInfo createInfo, Object[] methodArgs) {
this.context = context;
this.createInfo = createInfo;
this.methodArgs = methodArgs;
}
}
public interface Wrapper {
Object wrap(Input input, Function<Input, Object> fun);
}
}
| [
"dev.langchain4j.model.input.PromptTemplate.from",
"dev.langchain4j.model.input.structured.StructuredPromptProcessor.toPrompt"
] | [((5712, 6437), 'io.smallrye.mutiny.Multi.createFrom'), ((11153, 11223), 'dev.langchain4j.model.input.PromptTemplate.from'), ((12561, 12626), 'dev.langchain4j.model.input.PromptTemplate.from'), ((15232, 15278), 'dev.langchain4j.model.input.structured.StructuredPromptProcessor.toPrompt')] |
package io.thomasvitale.langchain4j.spring.core.chat.messages.jackson;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import org.json.JSONException;
import org.junit.jupiter.api.Test;
import org.skyscreamer.jsonassert.JSONAssert;
import org.skyscreamer.jsonassert.JSONCompareMode;
import io.thomasvitale.langchain4j.spring.core.json.jackson.LangChain4jJacksonProvider;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Unit tests for {@link AiMessageMixin}.
*/
class AiMessageMixinTests {
private final ObjectMapper objectMapper = LangChain4jJacksonProvider.getObjectMapper();
@Test
void serializeAndDeserializeAiMessageWithText() throws JsonProcessingException, JSONException {
var message = AiMessage.from("Simple answer");
var json = objectMapper.writeValueAsString(message);
JSONAssert.assertEquals("""
{
"text": "Simple answer",
"type": "AI"
}
""", json, JSONCompareMode.STRICT);
var deserializedMessage = objectMapper.readValue(json, ChatMessage.class);
assertThat(deserializedMessage).isEqualTo(message);
}
@Test
void serializeAndDeserializeAiMessageWithToolExecutionRequest() throws JsonProcessingException, JSONException {
var message = AiMessage.from(ToolExecutionRequest.builder().name("queryDatabase").arguments("{}").build());
var json = objectMapper.writeValueAsString(message);
JSONAssert.assertEquals("""
{
"toolExecutionRequests": [{
"name": "queryDatabase",
"arguments": "{}"
}],
"type": "AI"
}
""", json, JSONCompareMode.STRICT);
var deserializedMessage = objectMapper.readValue(json, ChatMessage.class);
assertThat(deserializedMessage).isEqualTo(message);
}
}
| [
"dev.langchain4j.agent.tool.ToolExecutionRequest.builder"
] | [((1581, 1657), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((1581, 1649), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((1581, 1633), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder')] |
package io.quarkiverse.langchain4j.test;
import static dev.langchain4j.data.message.AiMessage.aiMessage;
import static dev.langchain4j.data.message.ChatMessageDeserializer.messageFromJson;
import static dev.langchain4j.data.message.ChatMessageDeserializer.messagesFromJson;
import static dev.langchain4j.data.message.ChatMessageSerializer.messageToJson;
import static dev.langchain4j.data.message.ChatMessageSerializer.messagesToJson;
import static dev.langchain4j.data.message.SystemMessage.systemMessage;
import static dev.langchain4j.data.message.ToolExecutionResultMessage.toolExecutionResultMessage;
import static dev.langchain4j.data.message.UserMessage.userMessage;
import static java.util.Arrays.asList;
import static java.util.Collections.emptyList;
import static java.util.Collections.singletonList;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.List;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.ChatMessageSerializer;
import dev.langchain4j.data.message.ImageContent;
import dev.langchain4j.data.message.UserMessage;
import io.quarkus.test.QuarkusUnitTest;
class ChatMessageSerializerTest {
@RegisterExtension
static final QuarkusUnitTest unitTest = new QuarkusUnitTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
@Test
void should_serialize_and_deserialize_user_message_with_name() {
UserMessage message = userMessage("dummy", "hello");
String json = messageToJson(message);
ChatMessage deserializedMessage = messageFromJson(json);
assertThat(deserializedMessage).isEqualTo(message);
}
@Test
void should_serialize_and_deserialize_user_message_without_name() {
UserMessage message = userMessage("hello");
String json = messageToJson(message);
ChatMessage deserializedMessage = messageFromJson(json);
assertThat(deserializedMessage).isEqualTo(message);
}
@Test
void should_serialize_and_deserialize_user_message_with_image_content() {
UserMessage message = UserMessage.from(ImageContent.from("http://image.url"));
String json = messageToJson(message);
ChatMessage deserializedMessage = messageFromJson(json);
assertThat(deserializedMessage).isEqualTo(message);
}
@Test
void should_serialize_and_deserialize_empty_list() {
List<ChatMessage> messages = emptyList();
String json = messagesToJson(messages);
List<ChatMessage> deserializedMessages = messagesFromJson(json);
assertThat(deserializedMessages).isEmpty();
}
@Test
void should_deserialize_null_as_empty_list() {
assertThat(messagesFromJson(null)).isEmpty();
}
@Test
void should_serialize_and_deserialize_list_with_one_message() {
List<ChatMessage> messages = singletonList(userMessage("hello"));
String json = messagesToJson(messages);
assertThat(json).isEqualTo("[{\"contents\":[{\"text\":\"hello\",\"type\":\"TEXT\"}],\"type\":\"USER\"}]");
List<ChatMessage> deserializedMessages = messagesFromJson(json);
assertThat(deserializedMessages).isEqualTo(messages);
}
@Test
void should_serialize_and_deserialize_list_with_all_types_of_messages() {
List<ChatMessage> messages = asList(
systemMessage("Hello from system"),
userMessage("Hello from user"),
userMessage("Klaus", "Hello from Klaus"),
aiMessage("Hello from AI"),
aiMessage(ToolExecutionRequest.builder()
.name("calculator")
.arguments("{}")
.build()),
toolExecutionResultMessage("12345", "calculator", "4"));
String json = ChatMessageSerializer.messagesToJson(messages);
assertThat(json).isEqualTo("[" +
"{\"text\":\"Hello from system\",\"type\":\"SYSTEM\"}," +
"{\"contents\":[{\"text\":\"Hello from user\",\"type\":\"TEXT\"}],\"type\":\"USER\"}," +
"{\"name\":\"Klaus\",\"contents\":[{\"text\":\"Hello from Klaus\",\"type\":\"TEXT\"}],\"type\":\"USER\"}," +
"{\"text\":\"Hello from AI\",\"type\":\"AI\"}," +
"{\"toolExecutionRequests\":[{\"name\":\"calculator\",\"arguments\":\"{}\"}],\"type\":\"AI\"}," +
"{\"text\":\"4\",\"id\":\"12345\",\"toolName\":\"calculator\",\"type\":\"TOOL_EXECUTION_RESULT\"}" +
"]");
List<ChatMessage> deserializedMessages = messagesFromJson(json);
assertThat(deserializedMessages).isEqualTo(messages);
}
}
| [
"dev.langchain4j.agent.tool.ToolExecutionRequest.builder"
] | [((3823, 3971), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((3823, 3938), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((3823, 3897), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder')] |
package io.thomasvitale.langchain4j.spring.core.image.jackson;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import dev.langchain4j.data.image.Image;
import org.json.JSONException;
import org.junit.jupiter.api.Test;
import org.skyscreamer.jsonassert.JSONAssert;
import org.skyscreamer.jsonassert.JSONCompareMode;
import io.thomasvitale.langchain4j.spring.core.json.jackson.LangChain4jJacksonProvider;
import java.util.Base64;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Unit tests for {@link ImageMixin}.
*/
class ImageMixinTests {
private final ObjectMapper objectMapper = LangChain4jJacksonProvider.getObjectMapper();
@Test
void serializeAndDeserializeImageWithUrl() throws JsonProcessingException, JSONException {
var image = Image.builder().url("http://example.net").revisedPrompt("something funny").build();
var json = objectMapper.writeValueAsString(image);
JSONAssert.assertEquals("""
{
"url": "http://example.net",
"revisedPrompt": "something funny"
}
""", json, JSONCompareMode.STRICT);
var deserializedImage = objectMapper.readValue(json, Image.class);
assertThat(deserializedImage).isEqualTo(image);
}
@Test
void serializeAndDeserializeImageWithBase64AndMimeType() throws JsonProcessingException, JSONException {
var image = Image.builder()
.base64Data(Base64.getEncoder().encodeToString("image".getBytes()))
.mimeType("img/png")
.build();
var json = objectMapper.writeValueAsString(image);
JSONAssert.assertEquals("""
{
"base64Data": "aW1hZ2U=",
"mimeType": "img/png"
}
""", json, JSONCompareMode.STRICT);
var deserializedImage = objectMapper.readValue(json, Image.class);
assertThat(deserializedImage).isEqualTo(image);
}
}
| [
"dev.langchain4j.data.image.Image.builder"
] | [((845, 927), 'dev.langchain4j.data.image.Image.builder'), ((845, 919), 'dev.langchain4j.data.image.Image.builder'), ((845, 886), 'dev.langchain4j.data.image.Image.builder'), ((1512, 1661), 'dev.langchain4j.data.image.Image.builder'), ((1512, 1640), 'dev.langchain4j.data.image.Image.builder'), ((1512, 1607), 'dev.langchain4j.data.image.Image.builder'), ((1552, 1606), 'java.util.Base64.getEncoder')] |
package com.egineering.ai.llmjavademo;
import com.egineering.ai.llmjavademo.agents.FaqAgent;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.parser.apache.pdfbox.ApachePdfBoxDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.BertTokenizer;
import dev.langchain4j.model.ollama.OllamaStreamingChatModel;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import org.junit.jupiter.api.Test;
import org.springframework.util.ResourceUtils;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.Proxy;
import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;
public class Tests {
@Test
public void test() throws IOException {
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
File fileResource = ResourceUtils.getFile("classpath:jackson_lottery.pdf");
Document document = loadDocument(fileResource.toPath(), new ApachePdfBoxDocumentParser());
DocumentSplitter documentSplitter = DocumentSplitters.recursive(100, 2, new BertTokenizer());
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(documentSplitter)
.embeddingModel(new AllMiniLmL6V2EmbeddingModel())
.embeddingStore(embeddingStore)
.build();
ingestor.ingest(document);
}
@Test
public void test2() throws NoSuchFieldException, IllegalAccessException {
StreamingChatLanguageModel model = OllamaStreamingChatModel.builder()
.baseUrl("http://localhost:11434")
.modelName("llama2")
.temperature(0.0)
.build();
FaqAgent faqAgent = AiServices.builder(FaqAgent.class)
.streamingChatLanguageModel(model)
.chatMemory(MessageWindowChatMemory.withMaxMessages(20))
.build();
Field defaultAiServiceField = Proxy.getInvocationHandler(faqAgent).getClass().getDeclaredField("context");
defaultAiServiceField.setAccessible(true);
Object defaultAiServices = defaultAiServiceField.get(AiServices.class);
Proxy.getInvocationHandler(faqAgent);
}
}
| [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder",
"dev.langchain4j.model.ollama.OllamaStreamingChatModel.builder"
] | [((1713, 1937), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1713, 1912), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1713, 1864), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1713, 1797), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2112, 2293), 'dev.langchain4j.model.ollama.OllamaStreamingChatModel.builder'), ((2112, 2268), 'dev.langchain4j.model.ollama.OllamaStreamingChatModel.builder'), ((2112, 2234), 'dev.langchain4j.model.ollama.OllamaStreamingChatModel.builder'), ((2112, 2197), 'dev.langchain4j.model.ollama.OllamaStreamingChatModel.builder'), ((2324, 2507), 'dev.langchain4j.service.AiServices.builder'), ((2324, 2482), 'dev.langchain4j.service.AiServices.builder'), ((2324, 2409), 'dev.langchain4j.service.AiServices.builder'), ((2548, 2623), 'java.lang.reflect.Proxy.getInvocationHandler'), ((2548, 2595), 'java.lang.reflect.Proxy.getInvocationHandler')] |
package org.feuyeux.ai.langchain.hellolangchain;
import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
import static java.time.Duration.ofSeconds;
import static java.util.stream.Collectors.joining;
import static org.feuyeux.ai.langchain.hellolangchain.OpenApi.getKey;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.store.embedding.EmbeddingMatch;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
@Slf4j
public class RetrievalTest {
public static final String SIMPSON_S_ADVENTURES_TXT =
"src/test/resources/simpson's_adventures.txt";
@AfterEach
public void tearDown() throws InterruptedException {
TimeUnit.SECONDS.sleep(25);
}
@Test
public void givenDocument_whenPrompted_thenValidResponse() {
Document document = loadDocument(Paths.get(SIMPSON_S_ADVENTURES_TXT), new TextDocumentParser());
DocumentSplitter splitter =
DocumentSplitters.recursive(100, 0, new OpenAiTokenizer(GPT_3_5_TURBO));
List<TextSegment> segments = splitter.split(document);
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
List<Embedding> embeddings = embeddingModel.embedAll(segments).content();
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
embeddingStore.addAll(embeddings, segments);
String question = "Who is Simpson?";
Embedding questionEmbedding = embeddingModel.embed(question).content();
int maxResults = 3;
double minScore = 0.7;
List<EmbeddingMatch<TextSegment>> relevantEmbeddings =
embeddingStore.findRelevant(questionEmbedding, maxResults, minScore);
PromptTemplate promptTemplate =
PromptTemplate.from(
"Answer the following question to the best of your ability:\n"
+ "\n"
+ "Question:\n"
+ "{{question}}\n"
+ "\n"
+ "Base your answer on the following information:\n"
+ "{{information}}");
String information =
relevantEmbeddings.stream().map(match -> match.embedded().text()).collect(joining("\n\n"));
Map<String, Object> variables = new HashMap<>();
variables.put("question", question);
variables.put("information", information);
Prompt prompt = promptTemplate.apply(variables);
ChatLanguageModel chatModel =
OpenAiChatModel.builder().apiKey(getKey()).timeout(ofSeconds(60)).build();
AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content();
log.info(aiMessage.text());
Assertions.assertNotNull(aiMessage.text());
}
}
| [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((1821, 1847), 'java.util.concurrent.TimeUnit.SECONDS.sleep'), ((3509, 3582), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3509, 3574), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3509, 3551), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
package org.feuyeux.ai.langchain.hellolangchain;
import static org.assertj.core.api.Assertions.assertThat;
import static org.feuyeux.ai.langchain.hellolangchain.OpenApi.getKey;
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.service.AiServices;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
@Slf4j
public class AgentsTest {
static class Calculator {
@Tool("Calculates the length of a string")
int stringLength(String s) throws InterruptedException {
log.info("Calculating the length of \"{}\"...", s);
TimeUnit.SECONDS.sleep(15);
return s.length();
}
@Tool("Calculates the sum of two numbers")
int add(int a, int b) {
return a + b;
}
}
interface Assistant {
String chat(String userMessage);
}
@AfterEach
public void tearDown() throws InterruptedException {
TimeUnit.SECONDS.sleep(25);
}
@Test
public void givenServiceWithTools_whenPrompted_thenValidResponse() throws InterruptedException {
Assistant assistant =
AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(getKey()))
.tools(new Calculator())
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.build();
String question =
"What is the sum of the numbers of letters in the words \"language\" and \"model\"?";
String answer = assistant.chat(question);
log.info("answer:{}", answer);
assertThat(answer).contains("13");
}
}
| [
"dev.langchain4j.service.AiServices.builder"
] | [((756, 782), 'java.util.concurrent.TimeUnit.SECONDS.sleep'), ((1060, 1086), 'java.util.concurrent.TimeUnit.SECONDS.sleep'), ((1234, 1465), 'dev.langchain4j.service.AiServices.builder'), ((1234, 1444), 'dev.langchain4j.service.AiServices.builder'), ((1234, 1375), 'dev.langchain4j.service.AiServices.builder'), ((1234, 1338), 'dev.langchain4j.service.AiServices.builder')] |
package dev.langchain4j.rag.content.retriever;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.rag.query.Query;
import dev.langchain4j.store.embedding.EmbeddingMatch;
import dev.langchain4j.store.embedding.EmbeddingSearchRequest;
import dev.langchain4j.store.embedding.EmbeddingSearchResult;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.filter.Filter;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static dev.langchain4j.store.embedding.filter.MetadataFilterBuilder.metadataKey;
import static java.util.Arrays.asList;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.*;
class EmbeddingStoreContentRetrieverTest {
private static EmbeddingStore<TextSegment> EMBEDDING_STORE;
private static EmbeddingModel EMBEDDING_MODEL;
private static final Embedding EMBEDDING = Embedding.from(asList(1f, 2f, 3f));
private static final Query QUERY = Query.from("query");
private static final int DEFAULT_MAX_RESULTS = 3;
private static final int CUSTOM_MAX_RESULTS = 1;
private static final double CUSTOM_MIN_SCORE = 0.7;
public static final double DEFAULT_MIN_SCORE = 0.0;
@BeforeEach
void beforeEach() {
EMBEDDING_STORE = mock(EmbeddingStore.class);
when(EMBEDDING_STORE.search(any())).thenReturn(new EmbeddingSearchResult<>(asList(
new EmbeddingMatch<>(0.9, "id 1", null, TextSegment.from("content 1")),
new EmbeddingMatch<>(0.7, "id 2", null, TextSegment.from("content 2"))
)));
EMBEDDING_MODEL = mock(EmbeddingModel.class);
when(EMBEDDING_MODEL.embed(anyString())).thenReturn(Response.from(EMBEDDING));
}
@AfterEach
void afterEach() {
verify(EMBEDDING_MODEL).embed(QUERY.text());
verifyNoMoreInteractions(EMBEDDING_MODEL);
}
@Test
void should_retrieve() {
// given
ContentRetriever contentRetriever = new EmbeddingStoreContentRetriever(EMBEDDING_STORE, EMBEDDING_MODEL);
// when
contentRetriever.retrieve(QUERY);
// then
verify(EMBEDDING_STORE).search(EmbeddingSearchRequest.builder()
.queryEmbedding(EMBEDDING)
.maxResults(DEFAULT_MAX_RESULTS)
.minScore(DEFAULT_MIN_SCORE)
.build());
verifyNoMoreInteractions(EMBEDDING_STORE);
}
@Test
void should_retrieve_builder() {
// given
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(EMBEDDING_STORE)
.embeddingModel(EMBEDDING_MODEL)
.build();
// when
contentRetriever.retrieve(QUERY);
// then
verify(EMBEDDING_STORE).search(EmbeddingSearchRequest.builder()
.queryEmbedding(EMBEDDING)
.maxResults(DEFAULT_MAX_RESULTS)
.minScore(DEFAULT_MIN_SCORE)
.build());
verifyNoMoreInteractions(EMBEDDING_STORE);
}
@Test
void should_retrieve_with_custom_maxResults() {
// given
ContentRetriever contentRetriever = new EmbeddingStoreContentRetriever(
EMBEDDING_STORE,
EMBEDDING_MODEL,
CUSTOM_MAX_RESULTS
);
// when
contentRetriever.retrieve(QUERY);
// then
verify(EMBEDDING_STORE).search(EmbeddingSearchRequest.builder()
.queryEmbedding(EMBEDDING)
.maxResults(CUSTOM_MAX_RESULTS)
.minScore(DEFAULT_MIN_SCORE)
.build());
verifyNoMoreInteractions(EMBEDDING_STORE);
}
@Test
void should_retrieve_with_custom_maxResults_builder() {
// given
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(EMBEDDING_STORE)
.embeddingModel(EMBEDDING_MODEL)
.maxResults(CUSTOM_MAX_RESULTS)
.build();
// when
contentRetriever.retrieve(QUERY);
// then
verify(EMBEDDING_STORE).search(EmbeddingSearchRequest.builder()
.queryEmbedding(EMBEDDING)
.maxResults(CUSTOM_MAX_RESULTS)
.minScore(DEFAULT_MIN_SCORE)
.build());
verifyNoMoreInteractions(EMBEDDING_STORE);
}
@Test
void should_retrieve_with_custom_dynamicMaxResults_builder() {
// given
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(EMBEDDING_STORE)
.embeddingModel(EMBEDDING_MODEL)
.dynamicMaxResults((query) -> CUSTOM_MAX_RESULTS)
.build();
// when
contentRetriever.retrieve(QUERY);
// then
verify(EMBEDDING_STORE).search(EmbeddingSearchRequest.builder()
.queryEmbedding(EMBEDDING)
.maxResults(CUSTOM_MAX_RESULTS)
.minScore(DEFAULT_MIN_SCORE)
.build());
verifyNoMoreInteractions(EMBEDDING_STORE);
}
@Test
void should_retrieve_with_custom_minScore_ctor() {
// given
ContentRetriever contentRetriever = new EmbeddingStoreContentRetriever(
EMBEDDING_STORE,
EMBEDDING_MODEL,
null,
CUSTOM_MIN_SCORE
);
// when
contentRetriever.retrieve(QUERY);
// then
verify(EMBEDDING_STORE).search(EmbeddingSearchRequest.builder()
.queryEmbedding(EMBEDDING)
.maxResults(DEFAULT_MAX_RESULTS)
.minScore(CUSTOM_MIN_SCORE)
.build());
verifyNoMoreInteractions(EMBEDDING_STORE);
}
@Test
void should_retrieve_with_custom_minScore_builder() {
// given
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(EMBEDDING_STORE)
.embeddingModel(EMBEDDING_MODEL)
.minScore(CUSTOM_MIN_SCORE)
.build();
// when
contentRetriever.retrieve(QUERY);
// then
verify(EMBEDDING_STORE).search(EmbeddingSearchRequest.builder()
.queryEmbedding(EMBEDDING)
.maxResults(DEFAULT_MAX_RESULTS)
.minScore(CUSTOM_MIN_SCORE)
.build());
verifyNoMoreInteractions(EMBEDDING_STORE);
}
@Test
void should_retrieve_with_custom_dynamicMinScore_builder() {
// given
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(EMBEDDING_STORE)
.embeddingModel(EMBEDDING_MODEL)
.dynamicMinScore((query) -> CUSTOM_MIN_SCORE)
.build();
// when
contentRetriever.retrieve(QUERY);
// then
verify(EMBEDDING_STORE).search(EmbeddingSearchRequest.builder()
.queryEmbedding(EMBEDDING)
.maxResults(DEFAULT_MAX_RESULTS)
.minScore(CUSTOM_MIN_SCORE)
.build());
verifyNoMoreInteractions(EMBEDDING_STORE);
}
@Test
void should_retrieve_with_custom_filter() {
// given
Filter metadataFilter = metadataKey("key").isEqualTo("value");
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(EMBEDDING_STORE)
.embeddingModel(EMBEDDING_MODEL)
.filter(metadataFilter)
.build();
// when
contentRetriever.retrieve(QUERY);
// then
verify(EMBEDDING_STORE).search(EmbeddingSearchRequest.builder()
.queryEmbedding(EMBEDDING)
.maxResults(DEFAULT_MAX_RESULTS)
.minScore(DEFAULT_MIN_SCORE)
.filter(metadataFilter)
.build());
verifyNoMoreInteractions(EMBEDDING_STORE);
}
@Test
void should_retrieve_with_custom_dynamicFilter() {
// given
Filter metadataFilter = metadataKey("key").isEqualTo("value");
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(EMBEDDING_STORE)
.embeddingModel(EMBEDDING_MODEL)
.dynamicFilter((query) -> metadataFilter)
.build();
// when
contentRetriever.retrieve(QUERY);
// then
verify(EMBEDDING_STORE).search(EmbeddingSearchRequest.builder()
.queryEmbedding(EMBEDDING)
.maxResults(DEFAULT_MAX_RESULTS)
.minScore(DEFAULT_MIN_SCORE)
.filter(metadataFilter)
.build());
verifyNoMoreInteractions(EMBEDDING_STORE);
}
} | [
"dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder"
] | [((2443, 2637), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((2443, 2612), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((2443, 2567), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((2443, 2518), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((3087, 3281), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((3087, 3256), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((3087, 3211), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((3087, 3162), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((3729, 3922), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((3729, 3897), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((3729, 3852), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((3729, 3804), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((4443, 4636), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((4443, 4611), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((4443, 4566), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((4443, 4518), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((5182, 5375), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((5182, 5350), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((5182, 5305), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((5182, 5257), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((5846, 6039), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((5846, 6014), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((5846, 5970), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((5846, 5921), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((6554, 6747), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((6554, 6722), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((6554, 6678), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((6554, 6629), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((7287, 7480), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((7287, 7455), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((7287, 7411), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((7287, 7362), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((8053, 8287), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((8053, 8262), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((8053, 8222), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((8053, 8177), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((8053, 8128), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((8885, 9119), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((8885, 9094), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((8885, 9054), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((8885, 9009), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder'), ((8885, 8960), 'dev.langchain4j.store.embedding.EmbeddingSearchRequest.builder')] |
package dev.langchain4j.model.chat;
import dev.langchain4j.agent.tool.ToolSpecification;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.output.Response;
import org.assertj.core.api.WithAssertions;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
class ChatLanguageModelTest implements WithAssertions {
public static class UpperCaseEchoModel implements ChatLanguageModel {
@Override
public Response<AiMessage> generate(List<ChatMessage> messages) {
ChatMessage lastMessage = messages.get(messages.size() - 1);
return new Response<>(new AiMessage(lastMessage.text().toUpperCase(Locale.ROOT)));
}
}
@Test
public void test_not_supported() {
ChatLanguageModel model = new UpperCaseEchoModel();
List<ChatMessage> messages = new ArrayList<>();
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> model.generate(messages, new ArrayList<>()))
.withMessageContaining("Tools are currently not supported by this model");
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> model.generate(messages, ToolSpecification.builder().name("foo").build()))
.withMessageContaining("Tools are currently not supported by this model");
}
@Test
public void test_generate() {
ChatLanguageModel model = new UpperCaseEchoModel();
assertThat(model.generate("how are you?"))
.isEqualTo("HOW ARE YOU?");
{
List<ChatMessage> messages = new ArrayList<>();
messages.add(new UserMessage("Hello"));
messages.add(new AiMessage("Hi"));
messages.add(new UserMessage("How are you?"));
Response<AiMessage> response = model.generate(messages);
assertThat(response.content().text()).isEqualTo("HOW ARE YOU?");
assertThat(response.tokenUsage()).isNull();
assertThat(response.finishReason()).isNull();
}
{
Response<AiMessage> response = model.generate(
new UserMessage("Hello"),
new AiMessage("Hi"),
new UserMessage("How are you?"));
assertThat(response.content().text()).isEqualTo("HOW ARE YOU?");
assertThat(response.tokenUsage()).isNull();
assertThat(response.finishReason()).isNull();
}
}
}
| [
"dev.langchain4j.agent.tool.ToolSpecification.builder"
] | [((1374, 1421), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((1374, 1413), 'dev.langchain4j.agent.tool.ToolSpecification.builder')] |
package dev.langchain4j.store.embedding.filter.builder.sql;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.rag.query.Query;
import dev.langchain4j.store.embedding.filter.Filter;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.time.LocalDate;
import java.util.stream.Stream;
import static dev.langchain4j.store.embedding.filter.MetadataFilterBuilder.metadataKey;
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
class LanguageModelSqlFilterBuilderIT {
private static final String OLLAMA_BASE_URL = "http://localhost:11434";
private static final int OLLAMA_NUM_PREDICT = 25;
TableDefinition table = new TableDefinition(
"movies",
"",
asList(
new ColumnDefinition("name", "VARCHAR(50)", ""),
new ColumnDefinition("genre", "VARCHAR(50)", "one of: [comedy, drama, action]"),
new ColumnDefinition("year", "INTEGER", "")
)
);
@ParameterizedTest
@MethodSource("models")
void should_filter_by_genre(ChatLanguageModel model) {
// given
LanguageModelSqlFilterBuilder sqlFilterBuilder = new LanguageModelSqlFilterBuilder(model, table);
Query query = Query.from("I want to watch something funny");
// when
Filter filter = sqlFilterBuilder.build(query);
// then
assertThat(filter).isEqualTo(metadataKey("genre").isEqualTo("comedy"));
}
@ParameterizedTest
@MethodSource("models")
void should_filter_by_genre_and_year(ChatLanguageModel model) {
// given
LanguageModelSqlFilterBuilder sqlFilterBuilder = LanguageModelSqlFilterBuilder.builder()
.chatLanguageModel(model)
.tableDefinition(table)
.build();
Query query = Query.from("I want to watch drama from current year");
// when
Filter filter = sqlFilterBuilder.build(query);
// then
assertThat(filter).isEqualTo(metadataKey("genre").isEqualTo("drama").and(metadataKey("year").isEqualTo((long) LocalDate.now().getYear())));
}
@ParameterizedTest
@MethodSource("models")
void should_filter_by_year_range(ChatLanguageModel model) {
// given
LanguageModelSqlFilterBuilder sqlFilterBuilder = new LanguageModelSqlFilterBuilder(model, table);
Query query = Query.from("I want to watch some old movie from 90s");
// when
Filter filter = sqlFilterBuilder.build(query);
// then
assertThat(filter).isEqualTo(metadataKey("year").isGreaterThanOrEqualTo(1990L).and(metadataKey("year").isLessThanOrEqualTo(1999L)));
}
@ParameterizedTest
@MethodSource("models")
void should_filter_by_year_using_arithmetics(ChatLanguageModel model) {
// given
LanguageModelSqlFilterBuilder sqlFilterBuilder = new LanguageModelSqlFilterBuilder(model, table);
Query query = Query.from("I want to watch some recent movie from the previous year");
// when
Filter filter = sqlFilterBuilder.build(query);
// then
assertThat(filter).isEqualTo(metadataKey("year").isEqualTo((long) LocalDate.now().getYear() - 1));
}
static Stream<Arguments> models() {
return Stream.of(
Arguments.of(
OpenAiChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.logRequests(true)
.logResponses(true)
.build()
)
// Arguments.of(
// OllamaChatModel.builder()
// .baseUrl(OLLAMA_BASE_URL)
// .modelName("sqlcoder")
// .numPredict(OLLAMA_NUM_PREDICT)
// .build()
// ),
// Arguments.of(
// OllamaChatModel.builder()
// .baseUrl(OLLAMA_BASE_URL)
// .modelName("codellama")
// .numPredict(OLLAMA_NUM_PREDICT)
// .build()
// ),
// Arguments.of(
// OllamaChatModel.builder()
// .baseUrl(OLLAMA_BASE_URL)
// .modelName("mistral")
// .numPredict(OLLAMA_NUM_PREDICT)
// .build()
// ),
// Arguments.of(
// OllamaChatModel.builder()
// .baseUrl(OLLAMA_BASE_URL)
// .modelName("llama2")
// .numPredict(OLLAMA_NUM_PREDICT)
// .build()
// ),
// Arguments.of(
// OllamaChatModel.builder()
// .baseUrl(OLLAMA_BASE_URL)
// .modelName("phi")
// .numPredict(OLLAMA_NUM_PREDICT)
// .build()
// )
);
}
} | [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((2309, 2334), 'java.time.LocalDate.now'), ((3409, 3434), 'java.time.LocalDate.now'), ((3569, 3975), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3569, 3934), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3569, 3882), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3569, 3831), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3569, 3742), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((3569, 3669), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
package dev.langchain4j.store.memory.chat.cassandra;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.memory.chat.TokenWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import lombok.extern.slf4j.Slf4j;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestMethodOrder;
import java.util.UUID;
import static dev.langchain4j.data.message.AiMessage.aiMessage;
import static dev.langchain4j.data.message.UserMessage.userMessage;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
import static org.assertj.core.api.Assertions.assertThat;
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
@Slf4j
abstract class CassandraChatMemoryStoreTestSupport {
protected final String KEYSPACE = "langchain4j";
protected static CassandraChatMemoryStore chatMemoryStore;
@Test
@Order(1)
@DisplayName("1. Should create a database")
void shouldInitializeDatabase() {
createDatabase();
}
@Test
@Order(2)
@DisplayName("2. Connection to the database")
void shouldConnectToDatabase() {
chatMemoryStore = createChatMemoryStore();
log.info("Chat memory store is created.");
// Connection to Cassandra is established
assertThat(chatMemoryStore.getCassandraSession()
.getMetadata()
.getKeyspace(KEYSPACE)).isPresent();
log.info("Chat memory table is present.");
}
@Test
@Order(3)
@DisplayName("3. ChatMemoryStore initialization (table)")
void shouldCreateChatMemoryStore() {
chatMemoryStore.create();
// Table exists
assertThat(chatMemoryStore.getCassandraSession()
.refreshSchema()
.getKeyspace(KEYSPACE).get()
.getTable(CassandraChatMemoryStore.DEFAULT_TABLE_NAME)).isPresent();
chatMemoryStore.clear();
}
@Test
@Order(4)
@DisplayName("4. Insert items")
void shouldInsertItems() {
// When
String chatSessionId = "chat-" + UUID.randomUUID();
ChatMemory chatMemory = MessageWindowChatMemory.builder()
.chatMemoryStore(chatMemoryStore)
.maxMessages(100)
.id(chatSessionId)
.build();
// When
UserMessage userMessage = userMessage("I will ask you a few question about ff4j.");
chatMemory.add(userMessage);
AiMessage aiMessage = aiMessage("Sure, go ahead!");
chatMemory.add(aiMessage);
// Then
assertThat(chatMemory.messages()).containsExactly(userMessage, aiMessage);
}
abstract void createDatabase();
abstract CassandraChatMemoryStore createChatMemoryStore();
}
| [
"dev.langchain4j.memory.chat.MessageWindowChatMemory.builder"
] | [((2372, 2549), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((2372, 2524), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((2372, 2489), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((2372, 2455), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder')] |
package dev.langchain4j.model.openai;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.agent.tool.ToolSpecification;
import dev.langchain4j.data.message.*;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.model.output.TokenUsage;
import org.junit.jupiter.api.Test;
import java.util.Base64;
import java.util.List;
import static dev.langchain4j.agent.tool.JsonSchemaProperty.INTEGER;
import static dev.langchain4j.data.message.ToolExecutionResultMessage.from;
import static dev.langchain4j.data.message.UserMessage.userMessage;
import static dev.langchain4j.internal.Utils.readBytes;
import static dev.langchain4j.model.openai.OpenAiChatModelName.GPT_3_5_TURBO;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO_1106;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_4_VISION_PREVIEW;
import static dev.langchain4j.model.output.FinishReason.*;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static org.assertj.core.api.Assertions.assertThat;
class OpenAiChatModelIT {
static final String CAT_IMAGE_URL = "https://upload.wikimedia.org/wikipedia/commons/e/e9/Felis_silvestris_silvestris_small_gradual_decrease_of_quality.png";
static final String DICE_IMAGE_URL = "https://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png";
ToolSpecification calculator = ToolSpecification.builder()
.name("calculator")
.description("returns a sum of two numbers")
.addParameter("first", INTEGER)
.addParameter("second", INTEGER)
.build();
ChatLanguageModel model = OpenAiChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.temperature(0.0)
.logRequests(true)
.logResponses(true)
.build();
ChatLanguageModel visionModel = OpenAiChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.modelName(GPT_4_VISION_PREVIEW)
.temperature(0.0)
.logRequests(true)
.logResponses(true)
.build();
@Test
void should_generate_answer_and_return_token_usage_and_finish_reason_stop() {
// given
UserMessage userMessage = userMessage("What is the capital of Germany?");
// when
Response<AiMessage> response = model.generate(userMessage);
// then
assertThat(response.content().text()).contains("Berlin");
TokenUsage tokenUsage = response.tokenUsage();
assertThat(tokenUsage.inputTokenCount()).isEqualTo(14);
assertThat(tokenUsage.outputTokenCount()).isGreaterThan(0);
assertThat(tokenUsage.totalTokenCount())
.isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount());
assertThat(response.finishReason()).isEqualTo(STOP);
}
@Test
void should_generate_answer_and_return_token_usage_and_finish_reason_length() {
// given
ChatLanguageModel model = OpenAiChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.maxTokens(3)
.build();
UserMessage userMessage = userMessage("What is the capital of Germany?");
// when
Response<AiMessage> response = model.generate(userMessage);
// then
assertThat(response.content().text()).isNotBlank();
TokenUsage tokenUsage = response.tokenUsage();
assertThat(tokenUsage.inputTokenCount()).isEqualTo(14);
assertThat(tokenUsage.outputTokenCount()).isEqualTo(3);
assertThat(tokenUsage.totalTokenCount())
.isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount());
assertThat(response.finishReason()).isEqualTo(LENGTH);
}
@Test
void should_execute_a_tool_then_answer() {
// given
UserMessage userMessage = userMessage("2+2=?");
List<ToolSpecification> toolSpecifications = singletonList(calculator);
// when
Response<AiMessage> response = model.generate(singletonList(userMessage), toolSpecifications);
// then
AiMessage aiMessage = response.content();
assertThat(aiMessage.text()).isNull();
assertThat(aiMessage.toolExecutionRequests()).hasSize(1);
ToolExecutionRequest toolExecutionRequest = aiMessage.toolExecutionRequests().get(0);
assertThat(toolExecutionRequest.id()).isNotBlank();
assertThat(toolExecutionRequest.name()).isEqualTo("calculator");
assertThat(toolExecutionRequest.arguments()).isEqualToIgnoringWhitespace("{\"first\": 2, \"second\": 2}");
TokenUsage tokenUsage = response.tokenUsage();
assertThat(tokenUsage.inputTokenCount()).isEqualTo(52);
assertThat(tokenUsage.outputTokenCount()).isEqualTo(18);
assertThat(tokenUsage.totalTokenCount())
.isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount());
assertThat(response.finishReason()).isEqualTo(TOOL_EXECUTION);
// given
ToolExecutionResultMessage toolExecutionResultMessage = from(toolExecutionRequest, "4");
List<ChatMessage> messages = asList(userMessage, aiMessage, toolExecutionResultMessage);
// when
Response<AiMessage> secondResponse = model.generate(messages);
// then
AiMessage secondAiMessage = secondResponse.content();
assertThat(secondAiMessage.text()).contains("4");
assertThat(secondAiMessage.toolExecutionRequests()).isNull();
TokenUsage secondTokenUsage = secondResponse.tokenUsage();
assertThat(secondTokenUsage.inputTokenCount()).isEqualTo(37);
assertThat(secondTokenUsage.outputTokenCount()).isGreaterThan(0);
assertThat(secondTokenUsage.totalTokenCount())
.isEqualTo(secondTokenUsage.inputTokenCount() + secondTokenUsage.outputTokenCount());
assertThat(secondResponse.finishReason()).isEqualTo(STOP);
}
@Test
void should_execute_tool_forcefully_then_answer() {
// given
UserMessage userMessage = userMessage("2+2=?");
// when
Response<AiMessage> response = model.generate(singletonList(userMessage), calculator);
// then
AiMessage aiMessage = response.content();
assertThat(aiMessage.text()).isNull();
assertThat(aiMessage.toolExecutionRequests()).hasSize(1);
ToolExecutionRequest toolExecutionRequest = aiMessage.toolExecutionRequests().get(0);
assertThat(toolExecutionRequest.id()).isNotBlank();
assertThat(toolExecutionRequest.name()).isEqualTo("calculator");
assertThat(toolExecutionRequest.arguments()).isEqualToIgnoringWhitespace("{\"first\": 2, \"second\": 2}");
TokenUsage tokenUsage = response.tokenUsage();
assertThat(tokenUsage.inputTokenCount()).isEqualTo(61);
assertThat(tokenUsage.outputTokenCount()).isEqualTo(9);
assertThat(tokenUsage.totalTokenCount())
.isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount());
assertThat(response.finishReason()).isEqualTo(STOP); // not sure if a bug in OpenAI or stop is expected here
// given
ToolExecutionResultMessage toolExecutionResultMessage = from(toolExecutionRequest, "4");
List<ChatMessage> messages = asList(userMessage, aiMessage, toolExecutionResultMessage);
// when
Response<AiMessage> secondResponse = model.generate(messages);
// then
AiMessage secondAiMessage = secondResponse.content();
assertThat(secondAiMessage.text()).contains("4");
assertThat(secondAiMessage.toolExecutionRequests()).isNull();
TokenUsage secondTokenUsage = secondResponse.tokenUsage();
assertThat(secondTokenUsage.inputTokenCount()).isEqualTo(37);
assertThat(secondTokenUsage.outputTokenCount()).isGreaterThan(0);
assertThat(secondTokenUsage.totalTokenCount())
.isEqualTo(secondTokenUsage.inputTokenCount() + secondTokenUsage.outputTokenCount());
assertThat(secondResponse.finishReason()).isEqualTo(STOP);
}
@Test
void should_execute_multiple_tools_in_parallel_then_answer() {
// given
ChatLanguageModel model = OpenAiChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.modelName(GPT_3_5_TURBO_1106) // supports parallel function calling
.temperature(0.0)
.build();
UserMessage userMessage = userMessage("2+2=? 3+3=?");
List<ToolSpecification> toolSpecifications = singletonList(calculator);
// when
Response<AiMessage> response = model.generate(singletonList(userMessage), toolSpecifications);
// then
AiMessage aiMessage = response.content();
assertThat(aiMessage.text()).isNull();
assertThat(aiMessage.toolExecutionRequests()).hasSize(2);
ToolExecutionRequest toolExecutionRequest1 = aiMessage.toolExecutionRequests().get(0);
assertThat(toolExecutionRequest1.name()).isEqualTo("calculator");
assertThat(toolExecutionRequest1.arguments()).isEqualToIgnoringWhitespace("{\"first\": 2, \"second\": 2}");
ToolExecutionRequest toolExecutionRequest2 = aiMessage.toolExecutionRequests().get(1);
assertThat(toolExecutionRequest2.name()).isEqualTo("calculator");
assertThat(toolExecutionRequest2.arguments()).isEqualToIgnoringWhitespace("{\"first\": 3, \"second\": 3}");
TokenUsage tokenUsage = response.tokenUsage();
assertThat(tokenUsage.inputTokenCount()).isEqualTo(57);
assertThat(tokenUsage.outputTokenCount()).isEqualTo(51);
assertThat(tokenUsage.totalTokenCount())
.isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount());
assertThat(response.finishReason()).isEqualTo(TOOL_EXECUTION);
// given
ToolExecutionResultMessage toolExecutionResultMessage1 = from(toolExecutionRequest1, "4");
ToolExecutionResultMessage toolExecutionResultMessage2 = from(toolExecutionRequest2, "6");
List<ChatMessage> messages = asList(userMessage, aiMessage, toolExecutionResultMessage1, toolExecutionResultMessage2);
// when
Response<AiMessage> secondResponse = model.generate(messages);
// then
AiMessage secondAiMessage = secondResponse.content();
assertThat(secondAiMessage.text()).contains("4", "6");
assertThat(secondAiMessage.toolExecutionRequests()).isNull();
TokenUsage secondTokenUsage = secondResponse.tokenUsage();
assertThat(secondTokenUsage.inputTokenCount()).isEqualTo(83);
assertThat(secondTokenUsage.outputTokenCount()).isGreaterThan(0);
assertThat(secondTokenUsage.totalTokenCount())
.isEqualTo(secondTokenUsage.inputTokenCount() + secondTokenUsage.outputTokenCount());
assertThat(secondResponse.finishReason()).isEqualTo(STOP);
}
@Test
void should_generate_valid_json() {
//given
String userMessage = "Return JSON with two fields: name and surname of Klaus Heisler. " +
"Before returning, tell me a joke."; // nudging it to say something additionally to json
String expectedJson = "{\"name\": \"Klaus\", \"surname\": \"Heisler\"}";
assertThat(model.generate(userMessage)).isNotEqualToIgnoringWhitespace(expectedJson);
ChatLanguageModel modelGeneratingJson = OpenAiChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.modelName(GPT_3_5_TURBO_1106) // supports response_format = 'json_object'
.responseFormat("json_object")
.logRequests(true)
.logResponses(true)
.build();
// when
String json = modelGeneratingJson.generate(userMessage);
// then
assertThat(json).isEqualToIgnoringWhitespace(expectedJson);
}
@Test
void should_accept_image_url() {
// given
ImageContent imageContent = ImageContent.from(CAT_IMAGE_URL);
UserMessage userMessage = UserMessage.from(imageContent);
// when
Response<AiMessage> response = visionModel.generate(userMessage);
// then
assertThat(response.content().text()).containsIgnoringCase("cat");
assertThat(response.tokenUsage().inputTokenCount()).isEqualTo(92);
}
@Test
void should_accept_base64_image() {
// given
String base64Data = Base64.getEncoder().encodeToString(readBytes(CAT_IMAGE_URL));
ImageContent imageContent = ImageContent.from(base64Data, "image/png");
UserMessage userMessage = UserMessage.from(imageContent);
// when
Response<AiMessage> response = visionModel.generate(userMessage);
// then
assertThat(response.content().text()).containsIgnoringCase("cat");
assertThat(response.tokenUsage().inputTokenCount()).isEqualTo(92);
}
@Test
void should_accept_text_and_image() {
// given
UserMessage userMessage = UserMessage.from(
TextContent.from("What do you see? Reply in one word."),
ImageContent.from(CAT_IMAGE_URL)
);
// when
Response<AiMessage> response = visionModel.generate(userMessage);
// then
assertThat(response.content().text()).containsIgnoringCase("cat");
assertThat(response.tokenUsage().inputTokenCount()).isEqualTo(102);
}
@Test
void should_accept_text_and_multiple_images() {
// given
UserMessage userMessage = UserMessage.from(
TextContent.from("What do you see? Reply with one word per image."),
ImageContent.from(CAT_IMAGE_URL),
ImageContent.from(DICE_IMAGE_URL)
);
// when
Response<AiMessage> response = visionModel.generate(userMessage);
// then
assertThat(response.content().text())
.containsIgnoringCase("cat")
.containsIgnoringCase("dice");
assertThat(response.tokenUsage().inputTokenCount()).isEqualTo(189);
}
@Test
void should_accept_text_and_multiple_images_from_different_sources() {
// given
UserMessage userMessage = UserMessage.from(
ImageContent.from(CAT_IMAGE_URL),
ImageContent.from(Base64.getEncoder().encodeToString(readBytes(DICE_IMAGE_URL)), "image/png"),
TextContent.from("What do you see? Reply with one word per image.")
);
// when
Response<AiMessage> response = visionModel.generate(userMessage);
// then
assertThat(response.content().text())
.containsIgnoringCase("cat")
.containsIgnoringCase("dice");
assertThat(response.tokenUsage().inputTokenCount()).isEqualTo(189);
}
@Test
void should_use_enum_as_model_name() {
// given
OpenAiChatModel model = OpenAiChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.modelName(GPT_3_5_TURBO)
.logRequests(true)
.logResponses(true)
.build();
// when
String response = model.generate("What is the capital of Germany?");
// then
assertThat(response).containsIgnoringCase("Berlin");
}
}
| [
"dev.langchain4j.agent.tool.ToolSpecification.builder"
] | [((1491, 1717), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((1491, 1696), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((1491, 1651), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((1491, 1607), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((1491, 1550), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((13320, 13380), 'java.util.Base64.getEncoder'), ((15213, 15274), 'java.util.Base64.getEncoder')] |
import dev.langchain4j.chain.ConversationalRetrievalChain;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.store.embedding.EmbeddingMatch;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;
import static java.util.stream.Collectors.joining;
public class _12_ChatWithDocumentsExamples {
static class IfYouNeedSimplicity {
public static void main(String[] args) throws Exception {
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(DocumentSplitters.recursive(300, 0))
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
Document document = loadDocument(toPath("story-about-happy-carrot.txt"), new TextDocumentParser());
ingestor.ingest(document);
ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder()
.chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY))
.retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel))
// .chatMemory() // you can override default chat memory
// .promptTemplate() // you can override default prompt template
.build();
String answer = chain.execute("Who is Charlie?");
System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille...
}
}
static class If_You_Need_More_Control {
public static void main(String[] args) {
// Load the document that includes the information you'd like to "chat" about with the model.
Document document = loadDocument(toPath("story-about-happy-carrot.txt"), new TextDocumentParser());
// Split document into segments 100 tokens each
DocumentSplitter splitter = DocumentSplitters.recursive(
100,
0,
new OpenAiTokenizer("gpt-3.5-turbo")
);
List<TextSegment> segments = splitter.split(document);
// Embed segments (convert them into vectors that represent the meaning) using embedding model
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
List<Embedding> embeddings = embeddingModel.embedAll(segments).content();
// Store embeddings into embedding store for further search / retrieval
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
embeddingStore.addAll(embeddings, segments);
// Specify the question you want to ask the model
String question = "Who is Charlie?";
// Embed the question
Embedding questionEmbedding = embeddingModel.embed(question).content();
// Find relevant embeddings in embedding store by semantic similarity
// You can play with parameters below to find a sweet spot for your specific use case
int maxResults = 3;
double minScore = 0.7;
List<EmbeddingMatch<TextSegment>> relevantEmbeddings
= embeddingStore.findRelevant(questionEmbedding, maxResults, minScore);
// Create a prompt for the model that includes question and relevant embeddings
PromptTemplate promptTemplate = PromptTemplate.from(
"Answer the following question to the best of your ability:\n"
+ "\n"
+ "Question:\n"
+ "{{question}}\n"
+ "\n"
+ "Base your answer on the following information:\n"
+ "{{information}}");
String information = relevantEmbeddings.stream()
.map(match -> match.embedded().text())
.collect(joining("\n\n"));
Map<String, Object> variables = new HashMap<>();
variables.put("question", question);
variables.put("information", information);
Prompt prompt = promptTemplate.apply(variables);
// Send the prompt to the OpenAI chat model
ChatLanguageModel chatModel = OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY);
AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content();
// See an answer from the model
String answer = aiMessage.text();
System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille...
}
}
private static Path toPath(String fileName) {
try {
URL fileUrl = _12_ChatWithDocumentsExamples.class.getResource(fileName);
return Paths.get(fileUrl.toURI());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
} | [
"dev.langchain4j.chain.ConversationalRetrievalChain.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((1810, 2050), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1810, 2021), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1810, 1969), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1810, 1917), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2254, 2667), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2254, 2476), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2254, 2383), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')] |
///usr/bin/env jbang "$0" "$@" ; exit $?
//DEPS dev.langchain4j:langchain4j:0.25.0
//DEPS dev.langchain4j:langchain4j-ollama:0.25.0
//DEPS org.slf4j:slf4j-jdk14:2.0.10
import static java.lang.System.out;
import java.util.concurrent.CountDownLatch;
import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.language.StreamingLanguageModel;
import dev.langchain4j.model.ollama.OllamaStreamingLanguageModel;
import dev.langchain4j.model.output.Response;
public class LangchainOllama {
public static void main(String[] args) throws InterruptedException {
StreamingLanguageModel model = OllamaStreamingLanguageModel.builder()
.baseUrl("http://localhost:11434")
.modelName("mistral")
.temperature(0.0)
.build();
String review = "What is the captial of Germany?";
out.print("Answer: ");
CountDownLatch latch = new CountDownLatch(1);
model.generate(review, new StreamingResponseHandler<String>() {
@Override
public void onNext(String token) {
System.out.print(token);
}
@Override
public void onComplete(Response<String> response) {
latch.countDown();
}
@Override
public void onError(Throwable error) {
latch.countDown();
}
});
latch.await();
System.exit(0);
}
} | [
"dev.langchain4j.model.ollama.OllamaStreamingLanguageModel.builder"
] | [((627, 797), 'dev.langchain4j.model.ollama.OllamaStreamingLanguageModel.builder'), ((627, 776), 'dev.langchain4j.model.ollama.OllamaStreamingLanguageModel.builder'), ((627, 746), 'dev.langchain4j.model.ollama.OllamaStreamingLanguageModel.builder'), ((627, 712), 'dev.langchain4j.model.ollama.OllamaStreamingLanguageModel.builder')] |
package com.mindolph.base.genai.llm;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiChatModel.OpenAiChatModelBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.net.Proxy;
import java.time.Duration;
/**
* @author mindolph.com@gmail.com
* @since 1.7
*/
public class OpenAiProvider extends BaseLlmProvider {
private static final Logger log = LoggerFactory.getLogger(OpenAiProvider.class);
public OpenAiProvider(String apiKey, String aiModel) {
super(apiKey, aiModel);
}
@Override
protected ChatLanguageModel buildAI(float temperature) {
log.info("Build OpenAI with model %s and access %s".formatted(this.aiModel,
super.proxyEnabled ? "with %s proxy %s".formatted(Proxy.Type.valueOf(super.proxyType), this.proxyUrl) : "without proxy"));
OpenAiChatModelBuilder builder = OpenAiChatModel.builder()
.apiKey(this.apiKey)
.modelName(this.aiModel)
.maxRetries(1)
.timeout(Duration.ofSeconds(timeout))
.temperature((double) temperature);
if (super.proxyEnabled) {
Proxy.Type proxyType = Proxy.Type.valueOf(super.proxyType);
builder.proxy(new Proxy(proxyType, new InetSocketAddress(this.proxyHost, this.proxyPort)));
}
OpenAiChatModel model = builder.build();
return model;
}
}
| [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((884, 919), 'java.net.Proxy.Type.valueOf'), ((998, 1237), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((998, 1186), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((998, 1132), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((998, 1101), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((998, 1060), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1308, 1343), 'java.net.Proxy.Type.valueOf')] |
package me.nzuguem.bot.configurations.llm;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.rag.content.Content;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.rag.query.Query;
import io.quarkiverse.langchain4j.pgvector.PgVectorEmbeddingStore;
import jakarta.enterprise.context.ApplicationScoped;
import me.nzuguem.bot.exceptions.NotFoundRelevantException;
import java.util.List;
@ApplicationScoped
public class GithubAppContentRetriever implements ContentRetriever {
private final EmbeddingStoreContentRetriever retriever;
public GithubAppContentRetriever(PgVectorEmbeddingStore embeddingStore, EmbeddingModel embeddingModel) {
this.retriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.maxResults(20)
.minScore(0.9)
.build();
}
@Override
public List<Content> retrieve(Query query) {
var result = this.retriever.retrieve(query);
if (result.isEmpty()) {
throw new NotFoundRelevantException("No information relevant to the configured score was found");
}
return result;
}
}
| [
"dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder"
] | [((812, 1036), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((812, 1011), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((812, 980), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((812, 948), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((812, 900), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder')] |
/****************************************************************************************
Copyright © 2003-2012 hbasesoft Corporation. All rights reserved. Reproduction or <br>
transmission in whole or in part, in any form or by any means, electronic, mechanical <br>
or otherwise, is prohibited without the prior written consent of the copyright owner. <br>
****************************************************************************************/
package com.hbasesoft.framework.langchain4j.demo;
import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import com.hbasesoft.framework.common.utils.PropertyHolder;
import com.hbasesoft.framework.langchain4j.dashscope.QwenChatModel;
import com.hbasesoft.framework.langchain4j.dashscope.QwenEmbeddingModel;
import dev.langchain4j.chain.ConversationalRetrievalChain;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
/**
* <Description> <br>
*
* @author 王伟<br>
* @version 1.0<br>
* @taskId <br>
* @CreateDate 2023年10月26日 <br>
* @since V1.0<br>
* @see com.hbasesoft.framework.langchain4j.demo <br>
*/
public class ChatWithDocumentsExamples {
/**
* <Description> <br>
*
* @author 王伟<br>
* @version 1.0<br>
* @taskId <br>
* @CreateDate 2023年10月26日 <br>
* @since V1.0<br>
* @see com.hbasesoft.framework.langchain4j.demo <br>
*/
public static class IfYouNeedSimplicity {
/** NUM_500 */
private static final int NUM_500 = 500;
/**
* Description: <br>
*
* @author 王伟<br>
* @taskId <br>
* @param args
* @throws Exception <br>
*/
public static void main(final String[] args) throws Exception {
EmbeddingModel embeddingModel = QwenEmbeddingModel.builder()
.apiKey(PropertyHolder.getProperty("qwen.apikey")).build();
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(DocumentSplitters.recursive(NUM_500, 0)).embeddingModel(embeddingModel)
.embeddingStore(embeddingStore).build();
Document document = loadDocument(toPath("example-files/前端项目配置启动.docx"));
ingestor.ingest(document);
ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder()
.chatLanguageModel(QwenChatModel.builder().apiKey(PropertyHolder.getProperty("qwen.apikey")).build())
.retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel))
// .chatMemory() // you can override default chat memory
// .promptTemplate() // you can override default prompt template
.build();
String answer = chain.execute("npm 私服的地址是什么?");
System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille...
}
}
private static Path toPath(final String fileName) {
try {
URL fileUrl = Thread.currentThread().getContextClassLoader().getResource(fileName);
return Paths.get(fileUrl.toURI());
}
catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
}
| [
"dev.langchain4j.chain.ConversationalRetrievalChain.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((2335, 2438), 'com.hbasesoft.framework.langchain4j.dashscope.QwenEmbeddingModel.builder'), ((2335, 2430), 'com.hbasesoft.framework.langchain4j.dashscope.QwenEmbeddingModel.builder'), ((2577, 2771), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2577, 2763), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2577, 2715), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2577, 2684), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2964, 3388), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2964, 3209), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2964, 3120), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3038, 3119), 'com.hbasesoft.framework.langchain4j.dashscope.QwenChatModel.builder'), ((3038, 3111), 'com.hbasesoft.framework.langchain4j.dashscope.QwenChatModel.builder')] |
package my.samples;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.parser.apache.pdfbox.ApachePdfBoxDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
public class LoadFord150ManualToES {
public static final String ANSI_GREEN = "\u001B[32m";
public static final String ANSI_RESET = "\u001B[0m";
public static final String ANSI_YELLOW = "\u001B[33m";
public static void main(String[] args) {
EmbeddingStore<TextSegment> embeddingStore = ElasticsearchEmbeddingStore.builder()
.serverUrl("http://localhost:9200")
.indexName("car-warranty-guide-embeddings")
.dimension(384)
.build();
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(DocumentSplitters.recursive(300, 0))
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
Path filePath = toPath("example-files/2025_US_F150_Warranty_Guide_ENG_V1.pdf");
Document document = FileSystemDocumentLoader.loadDocument(filePath, new ApachePdfBoxDocumentParser());
document.metadata().add("fileName", filePath.getFileName().toString());
document.metadata().add("filePath", filePath.toString());
document.metadata().add("company", "FORD");
document.metadata().add("product", "F150");
document.metadata().add("language", "ENG");
document.metadata().add("version", "V1");
document.metadata().add("year", "2025");
document.metadata().add("type", "Warranty Guide");
document.metadata().add("country", "US");
document.metadata().add("category", "Automotive");
ingestor.ingest(document);
System.out.println(ANSI_GREEN + "Document ingested successfully" + ANSI_RESET);
}
private static Path toPath(String fileName) {
try {
// Corrected path assuming files are in src/main/resources/example-files
URL fileUrl = LoadFord150ManualToES.class.getClassLoader().getResource( fileName);
if (fileUrl == null) {
throw new RuntimeException("Resource not found: " + fileName);
}
return Paths.get(fileUrl.toURI());
} catch (URISyntaxException e) {
throw new RuntimeException("Failed to resolve URI for: " + fileName, e);
}
}
}
| [
"dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((1086, 1292), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1086, 1267), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1086, 1235), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1086, 1175), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1413, 1637), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1413, 1612), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1413, 1564), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1413, 1516), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
package ai.equity.salt.openai.model;
import ai.equity.salt.openai.OpenAiProperties;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiLanguageModel;
import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Component;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO_1106;
@Component
@RequiredArgsConstructor
public class OpenAiModelFactory {
private final OpenAiProperties properties;
public OpenAiLanguageModel create() {
return OpenAiLanguageModel.builder()
.apiKey(properties.key())
.logRequests(true)
.logResponses(true)
.build();
}
public OpenAiChatModel createDefaultChatModel() {
return OpenAiChatModel.builder()
.modelName(GPT_3_5_TURBO_1106)
.apiKey(properties.key())
.maxTokens(1024)
.temperature(0.0)
.topP(0.0)
.logRequests(true)
.logResponses(true)
.build();
}
}
| [
"dev.langchain4j.model.openai.OpenAiLanguageModel.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((540, 707), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((540, 682), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((540, 646), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((540, 611), 'dev.langchain4j.model.openai.OpenAiLanguageModel.builder'), ((784, 1088), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 1063), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 1027), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 992), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 965), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 931), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 898), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((784, 856), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
package edu.whut.cs.esd.llm.smarthome;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.retriever.Retriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import org.springframework.boot.ApplicationRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import org.springframework.core.io.Resource;
import org.springframework.core.io.ResourceLoader;
import java.io.IOException;
import java.util.Scanner;
import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
@SpringBootApplication
public class SmartHomeApplication {
/**
* Run SmartHomeApplicationTest to see simulated conversation with customer support agent
*/
@Bean
ApplicationRunner interactiveChatRunner(SmartHomeAgent agent) {
return args -> {
Scanner scanner = new Scanner(System.in);
while (true) {
System.out.print("User: ");
String userMessage = scanner.nextLine();
if ("exit".equalsIgnoreCase(userMessage)) {
break;
}
String agentMessage = agent.chat(userMessage);
System.out.println("Agent: " + agentMessage);
}
scanner.close();
};
}
@Bean
SmartHomeAgent customerSupportAgent(ChatLanguageModel chatLanguageModel,
EnvironmentalTools environmentalTools,
Retriever<TextSegment> retriever) {
return AiServices.builder(SmartHomeAgent.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(MessageWindowChatMemory.withMaxMessages(20))
.tools(environmentalTools)
.retriever(retriever)
.build();
}
@Bean
Retriever<TextSegment> retriever(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel) {
// You will need to adjust these parameters to find the optimal setting, which will depend on two main factors:
// - The nature of your data
// - The embedding model you are using
// int maxResultsRetrieved = 1;
int maxResultsRetrieved = 3;
double minScore = 0.6;
return EmbeddingStoreRetriever.from(embeddingStore, embeddingModel, maxResultsRetrieved, minScore);
}
@Bean
EmbeddingModel embeddingModel() {
return new AllMiniLmL6V2EmbeddingModel();
}
@Bean
EmbeddingStore<TextSegment> embeddingStore(EmbeddingModel embeddingModel, ResourceLoader resourceLoader) throws IOException {
// Normally, you would already have your embedding store filled with your data.
// However, for the purpose of this demonstration, we will:
// 1. Create an in-memory embedding store
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
// 2. Load an example document ("Miles of Smiles" terms of use)
Resource resource = resourceLoader.getResource("classpath:smart-home-product-manual.txt");
Document document = loadDocument(resource.getFile().toPath(), new TextDocumentParser());
// 3. Split the document into segments 100 tokens each
// 4. Convert segments into embeddings
// 5. Store embeddings into embedding store
// All this can be done manually, but we will use EmbeddingStoreIngestor to automate this:
// DocumentSplitter documentSplitter = DocumentSplitters.recursive(100, 0, new OpenAiTokenizer(GPT_3_5_TURBO));
DocumentSplitter documentSplitter = DocumentSplitters.recursive(300, 0, new OpenAiTokenizer(GPT_3_5_TURBO));
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(documentSplitter)
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
ingestor.ingest(document);
return embeddingStore;
}
public static void main(String[] args) {
SpringApplication.run(SmartHomeApplication.class, args);
}
}
| [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((2504, 2777), 'dev.langchain4j.service.AiServices.builder'), ((2504, 2752), 'dev.langchain4j.service.AiServices.builder'), ((2504, 2714), 'dev.langchain4j.service.AiServices.builder'), ((2504, 2671), 'dev.langchain4j.service.AiServices.builder'), ((2504, 2598), 'dev.langchain4j.service.AiServices.builder'), ((4685, 4890), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4685, 4865), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4685, 4817), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4685, 4769), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
package com.example.demo;
import java.time.Duration;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
public class _01_ModelParameters {
public static void main(String [] args) {
Duration duration = Duration.ofSeconds(60);
ChatLanguageModel model = OpenAiChatModel.builder()
.apiKey(ApiKeys.OPENAI_API_KEY)
.temperature(0.3)
.timeout(duration)
.logRequests(true)
.logResponses(true)
.build();
String prompt = "Explain in three lines how to make a code cleaner";
String response = model.generate(prompt);
System.out.println(response);
}
}
| [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((320, 486), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((320, 473), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((320, 449), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((320, 426), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((320, 403), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((320, 381), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
package com.sam.apihelpfulprofessor.service.Langchain;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sam.apihelpfulprofessor.mapper.TopicMapper;
import com.sam.apihelpfulprofessor.model.Topic;
import com.sam.apihelpfulprofessor.repository.TopicRepository;
import dev.langchain4j.chain.ConversationalRetrievalChain;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.internal.Json;
import dev.langchain4j.model.inprocess.InProcessEmbeddingModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import jakarta.annotation.PostConstruct;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.io.ClassPathResource;
import org.springframework.stereotype.Service;
import static dev.langchain4j.model.inprocess.InProcessEmbeddingModelType.ALL_MINILM_L6_V2;
import java.io.File;
import java.io.IOException;
import java.util.List;
@Service
public class LangChainService {
private String OPENAI_TOKEN;
public static OpenAiChatModel CHAT_MODEL;
public static InProcessEmbeddingModel EMBEDDING_MODEL = new InProcessEmbeddingModel(ALL_MINILM_L6_V2);
public static ConversationalRetrievalChain CHAIN;
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
EmbeddingStoreIngestor ingestor;
TopicMapper topicMapper = TopicMapper.INSTANCE;
private final TopicRepository topicRepository;
@Autowired
public LangChainService(@Value("${openaitoken}") String token,
TopicRepository topicRepository) {
this.OPENAI_TOKEN = token;
CHAT_MODEL = OpenAiChatModel.withApiKey(OPENAI_TOKEN);
this.topicRepository = topicRepository;
}
// @PostConstruct
public void buildEmbeddingStoreFromFile() throws IOException {
List<Topic> topics = loadTopicsFromFile("static/socio-data.json");
buildEmbeddingStore(topics);
}
@PostConstruct
public void buildEmbeddingStoreFromDb() throws IOException {
List<Topic> topics = topicRepository.findAll();
buildEmbeddingStore(topics);
}
public void buildEmbeddingStore(List<Topic> topics) {
ingestor = EmbeddingStoreIngestor.builder()
.splitter(new CustomSentenceSplitter())
.embeddingModel(EMBEDDING_MODEL)
.embeddingStore(embeddingStore)
.build();
topics.forEach(topic -> {
ingestor.ingest(Document.from(Json.toJson(topicMapper.toDto(topic))));
});
// Build LLM chain with a knowledge base of all the data from the file
CHAIN = ConversationalRetrievalChain.builder()
.chatLanguageModel(CHAT_MODEL)
.retriever(EmbeddingStoreRetriever.from(embeddingStore, EMBEDDING_MODEL))
.build();
}
private List<Topic> loadTopicsFromFile(String filename) throws IOException {
// Load file and embed each object
File file = new ClassPathResource(filename).getFile();
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(file, new TypeReference<List<Topic>>() {});
}
}
| [
"dev.langchain4j.chain.ConversationalRetrievalChain.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((2595, 2805), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2595, 2780), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2595, 2732), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2595, 2683), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3032, 3232), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3032, 3207), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((3032, 3117), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')] |
package com.pengyu.magnet.langchain4j;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.service.AiServices;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class Langchain4JConfig {
@Bean
AssessmentAgent assessmentAgent(ChatLanguageModel chatLanguageModel) {
return AiServices.builder(AssessmentAgent.class)
.chatLanguageModel(chatLanguageModel)
// .chatMemory(MessageWindowChatMemory.withMaxMessages(20))
.build();
}
@Bean
MatchAgent matchAgent(ChatLanguageModel chatLanguageModel) {
return AiServices.builder(MatchAgent.class)
.chatLanguageModel(chatLanguageModel)
// .chatMemory(MessageWindowChatMemory.withMaxMessages(20))
.build();
}
}
| [
"dev.langchain4j.service.AiServices.builder"
] | [((458, 653), 'dev.langchain4j.service.AiServices.builder'), ((458, 553), 'dev.langchain4j.service.AiServices.builder'), ((752, 942), 'dev.langchain4j.service.AiServices.builder'), ((752, 842), 'dev.langchain4j.service.AiServices.builder')] |
package it.croway;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.io.Resource;
import org.springframework.core.io.ResourceLoader;
import java.io.IOException;
import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
@Configuration
public class ConversationalAIConfiguration {
@Bean
ConversationalAIAgent customerSupportAgent(ChatLanguageModel chatLanguageModel,
ContentRetriever contentRetriever) {
return AiServices.builder(ConversationalAIAgent.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(MessageWindowChatMemory.withMaxMessages(20))
.contentRetriever(contentRetriever)
.build();
}
@Bean
ContentRetriever contentRetriever(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel) {
// You will need to adjust these parameters to find the optimal setting, which will depend on two main factors:
// - The nature of your data
// - The embedding model you are using
int maxResults = 1;
double minScore = 0.6;
return EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.maxResults(maxResults)
.minScore(minScore)
.build();
}
@Bean
EmbeddingModel embeddingModel() {
return new AllMiniLmL6V2EmbeddingModel();
}
@Bean
EmbeddingStore<TextSegment> embeddingStore(EmbeddingModel embeddingModel, ResourceLoader resourceLoader) throws IOException {
// Normally, you would already have your embedding store filled with your data.
// However, for the purpose of this demonstration, we will:
// 1. Create an in-memory embedding store
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
// camel-contributing is copied and pasted from https://camel.apache.org/camel-core/contributing/
Resource resource = resourceLoader.getResource("classpath:camel-contributing.txt");
Document document = loadDocument(resource.getFile().toPath(), new TextDocumentParser());
// 3. Split the document into segments 100 tokens each
// 4. Convert segments into embeddings
// 5. Store embeddings into embedding store
// All this can be done manually, but we will use EmbeddingStoreIngestor to automate this:
DocumentSplitter documentSplitter = DocumentSplitters.recursive(100, 0, new OpenAiTokenizer(GPT_3_5_TURBO));
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(documentSplitter)
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
ingestor.ingest(document);
return embeddingStore;
}
}
| [
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder",
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder"
] | [((1575, 1778), 'dev.langchain4j.service.AiServices.builder'), ((1575, 1765), 'dev.langchain4j.service.AiServices.builder'), ((1575, 1725), 'dev.langchain4j.service.AiServices.builder'), ((1575, 1664), 'dev.langchain4j.service.AiServices.builder'), ((2147, 2324), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2147, 2311), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2147, 2287), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2147, 2259), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2147, 2223), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3486, 3643), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3486, 3630), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3486, 3594), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((3486, 3558), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.service.AiServices;
public class _04_Agents {
static class MyAgent {
@Tool("...")
void printAgent() {
System.out.println("Personal secret agent activated!");
}
}
interface Assistant {
Response<AiMessage> chat(String userMessage);
}
public static void main(String[] args) {
String openAiKey = System.getenv("OPENAI_API_KEY");
var assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(openAiKey))
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.tools(new MyAgent())
.build();
var question = "What is the sum of the numbers of letters in the words 'language' and 'model'";
var response = assistant.chat(question);
System.out.println(response.content().text());
System.out.println("\n\n########### TOKEN USAGE ############\n");
System.out.println(response.tokenUsage());
}
}
| [
"dev.langchain4j.service.AiServices.builder"
] | [((696, 941), 'dev.langchain4j.service.AiServices.builder'), ((696, 916), 'dev.langchain4j.service.AiServices.builder'), ((696, 878), 'dev.langchain4j.service.AiServices.builder'), ((696, 805), 'dev.langchain4j.service.AiServices.builder')] |
package com.jibanez.clgeneratoraiservice;
import com.jibanez.clgeneratoraiservice.service.CoverLetterAiService;
import com.jibanez.clgeneratoraiservice.service.JobDetailsExtractorAiService;
import com.jibanez.clgeneratoraiservice.util.AiDemoService;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import org.springframework.core.io.Resource;
import org.springframework.core.io.ResourceLoader;
import java.io.IOException;
import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;
@SpringBootApplication
public class ClGeneratorAiServiceApplication {
public static void main(String[] args) {
SpringApplication.run(ClGeneratorAiServiceApplication.class, args);
}
@Bean
AiDemoService aiDemoService(ChatLanguageModel chatLanguageModel,
// BookingTools bookingTools,
ContentRetriever contentRetriever) {
return AiServices.builder(AiDemoService.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(MessageWindowChatMemory.withMaxMessages(20))
// .tools(bookingTools)
.contentRetriever(contentRetriever)
.build();
}
@Bean
CoverLetterAiService coverLetterAiService(ChatLanguageModel chatLanguageModel,
// BookingTools bookingTools,
ContentRetriever contentRetriever) {
return AiServices.builder(CoverLetterAiService.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(MessageWindowChatMemory.withMaxMessages(20))
// .tools(bookingTools)
.contentRetriever(contentRetriever)
.build();
}
@Bean
JobDetailsExtractorAiService jobDetailsExtractorAiService(ChatLanguageModel chatLanguageModel) {
return AiServices.builder(JobDetailsExtractorAiService.class)
.chatLanguageModel(chatLanguageModel)
.build();
}
@Bean
ContentRetriever contentRetriever(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel) {
// You will need to adjust these parameters to find the optimal setting, which will depend on two main factors:
// - The nature of your data
// - The embedding model you are using
int maxResults = 10;
double minScore = 0.6;
return EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.maxResults(maxResults)
.minScore(minScore)
.build();
}
@Bean
EmbeddingModel embeddingModel() {
return new AllMiniLmL6V2EmbeddingModel();
}
@Bean
EmbeddingStore<TextSegment> embeddingStore(EmbeddingModel embeddingModel, ResourceLoader resourceLoader) throws IOException {
// Normally, you would already have your embedding store filled with your data.
// However, for the purpose of this demonstration, we will:
// 1. Create an in-memory embedding store
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
// 2. Load a document with user data
Resource resource = resourceLoader.getResource("classpath:user-data.txt");
Document document = loadDocument(resource.getFile().toPath(), new TextDocumentParser());
// 3. Split the document into segments 300 tokens each
// 4. Convert segments into embeddings
// 5. Store embeddings into embedding store
// All this can be done manually, but we will use EmbeddingStoreIngestor to automate this:
DocumentSplitter documentSplitter = DocumentSplitters.recursive(300, 0);
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(documentSplitter)
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
ingestor.ingest(document);
return embeddingStore;
}
}
| [
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder",
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder"
] | [((1966, 2248), 'dev.langchain4j.service.AiServices.builder'), ((1966, 2223), 'dev.langchain4j.service.AiServices.builder'), ((1966, 2132), 'dev.langchain4j.service.AiServices.builder'), ((1966, 2059), 'dev.langchain4j.service.AiServices.builder'), ((2523, 2812), 'dev.langchain4j.service.AiServices.builder'), ((2523, 2787), 'dev.langchain4j.service.AiServices.builder'), ((2523, 2696), 'dev.langchain4j.service.AiServices.builder'), ((2523, 2623), 'dev.langchain4j.service.AiServices.builder'), ((2947, 3080), 'dev.langchain4j.service.AiServices.builder'), ((2947, 3055), 'dev.langchain4j.service.AiServices.builder'), ((3495, 3732), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3495, 3707), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3495, 3671), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3495, 3631), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3495, 3583), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((4891, 5096), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4891, 5071), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4891, 5023), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((4891, 4975), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
/*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.ai;
import com.google.cloud.vertexai.VertexAI;
import com.google.cloud.vertexai.api.Content;
import com.google.cloud.vertexai.generativeai.GenerativeModel;
import com.google.cloud.vertexai.api.GenerateContentResponse;
import com.google.cloud.vertexai.api.GenerationConfig;
import com.google.cloud.vertexai.api.SafetySetting;
import com.google.cloud.vertexai.api.HarmCategory;
import com.google.cloud.vertexai.api.Part;
import com.google.cloud.vertexai.api.Blob;
import com.google.protobuf.ByteString;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.model.vertexai.VertexAiChatModel;
import dev.langchain4j.model.vertexai.VertexAiLanguageModel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import services.config.CloudConfig;
import services.utility.CloudUtility;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@Service
public class VertexAIClient {
private static final Logger logger = LoggerFactory.getLogger(VertexAIClient.class);
public GenerateContentResponse promptOnImage(byte[] image) throws IOException {
return promptOnImage(image, "");
}
public GenerateContentResponse promptOnImage(byte[] image, String prompt) throws IOException {
GenerateContentResponse response = null;
if(prompt== null ||prompt.isBlank())
prompt = "Extract the book name, labels, main color, and author strictly in JSON format. The json output strictly have property names bookName, mainColor, author, and labels.";
String location = CloudUtility.extractRegion(CloudConfig.zone);
try (VertexAI vertexAI = new VertexAI(CloudConfig.projectID, location)) {
GenerationConfig generationConfig =
GenerationConfig.newBuilder()
.setMaxOutputTokens(2048)
.setTemperature(0.4F)
.setTopK(32)
.setTopP(1F)
.build();
GenerativeModel model = new GenerativeModel(VertexModels.GEMINI_PRO_VISION_VERSION, generationConfig, vertexAI);
List<SafetySetting> safetySettings = Arrays.asList(
SafetySetting.newBuilder()
.setCategory(HarmCategory.HARM_CATEGORY_HATE_SPEECH)
.setThreshold(SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE)
.build(),
SafetySetting.newBuilder()
.setCategory(HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT)
.setThreshold(SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE)
.build(),
SafetySetting.newBuilder()
.setCategory(HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT)
.setThreshold(SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE)
.build(),
SafetySetting.newBuilder()
.setCategory(HarmCategory.HARM_CATEGORY_HARASSMENT)
.setThreshold(SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE)
.build()
);
List<Content> contents = new ArrayList<>();
contents.add(Content.newBuilder().setRole("user").addParts(Part.newBuilder().setInlineData(Blob.newBuilder().setMimeType("image/png")
.setData(ByteString.copyFrom(image))))
.addParts(Part.newBuilder().setText(prompt))
.build());
// ResponseStream<GenerateContentResponse> responseStream = model.generateContentStream(contents, safetySettings);
response = model.generateContent(contents, safetySettings);
logger.info(response.toString());
}
return response;
}
public String promptModel(String prompt, String modelName) {
String output =null;
logger.info("The prompt & model name are: " + prompt.substring(0,100) +" | "+modelName);
if (modelName.contains("chat")) {
VertexAiChatModel vertexAiChatModel = VertexAiChatModel.builder()
.endpoint("us-central1-aiplatform.googleapis.com:443")
.project(CloudConfig.projectID)
.location(CloudConfig.zone)
.publisher("google")
.modelName(modelName)
.temperature(0.1)
.maxOutputTokens(1000)
.topK(0)
.topP(0.0)
.maxRetries(3)
.build();
Response<AiMessage> modelResponse = vertexAiChatModel.generate(UserMessage.from(prompt));
output = modelResponse.content().text();
} else {
VertexAiLanguageModel vertexAiTextModel = VertexAiLanguageModel.builder()
.endpoint("us-central1-aiplatform.googleapis.com:443")
.project(CloudConfig.projectID)
.location(CloudConfig.zone)
.publisher("google")
.modelName(modelName)
.temperature(0.1)
.maxOutputTokens(1000)
.topK(0)
.topP(0.0)
.maxRetries(3)
.build();
Response<String> textResponse = vertexAiTextModel.generate(prompt);
output = textResponse.content();
}
logger.info(output);
return output;
}
}
| [
"dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder",
"dev.langchain4j.model.vertexai.VertexAiChatModel.builder"
] | [((2519, 2771), 'com.google.cloud.vertexai.api.GenerationConfig.newBuilder'), ((2519, 2734), 'com.google.cloud.vertexai.api.GenerationConfig.newBuilder'), ((2519, 2693), 'com.google.cloud.vertexai.api.GenerationConfig.newBuilder'), ((2519, 2652), 'com.google.cloud.vertexai.api.GenerationConfig.newBuilder'), ((2519, 2602), 'com.google.cloud.vertexai.api.GenerationConfig.newBuilder'), ((2982, 3225), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((2982, 3188), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((2982, 3089), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3247, 3496), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3247, 3459), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3247, 3360), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3518, 3767), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3518, 3730), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3518, 3631), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3789, 4031), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3789, 3994), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((3789, 3895), 'com.google.cloud.vertexai.api.SafetySetting.newBuilder'), ((4128, 4409), 'com.google.cloud.vertexai.api.Content.newBuilder'), ((4128, 4380), 'com.google.cloud.vertexai.api.Content.newBuilder'), ((4128, 4315), 'com.google.cloud.vertexai.api.Content.newBuilder'), ((4128, 4164), 'com.google.cloud.vertexai.api.Content.newBuilder'), ((4174, 4314), 'com.google.cloud.vertexai.api.Part.newBuilder'), ((4206, 4313), 'com.google.cloud.vertexai.api.Blob.newBuilder'), ((4206, 4248), 'com.google.cloud.vertexai.api.Blob.newBuilder'), ((4346, 4379), 'com.google.cloud.vertexai.api.Part.newBuilder'), ((4981, 5471), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5442), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5407), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5376), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5347), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5304), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5266), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5224), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5183), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5135), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((4981, 5083), 'dev.langchain4j.model.vertexai.VertexAiChatModel.builder'), ((5699, 6193), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 6164), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 6129), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 6098), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 6069), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 6026), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 5988), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 5946), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 5905), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 5857), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder'), ((5699, 5805), 'dev.langchain4j.model.vertexai.VertexAiLanguageModel.builder')] |
package com.kchandrakant;
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.service.AiServices;
public class ServiceWithTools {
static class Calculator {
@Tool("Calculates the length of a string")
int stringLength(String s) {
return s.length();
}
@Tool("Calculates the sum of two numbers")
int add(int a, int b) {
return a + b;
}
@Tool("Calculates the square root of a number")
double sqrt(int x) {
return Math.sqrt(x);
}
}
interface Assistant {
String chat(String userMessage);
}
public static void main(String[] args) {
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY))
.tools(new Calculator())
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.build();
//String question = "What is the square root of the sum of the numbers of letters in the words \"hello\" and \"world\"?";
String question = "What is the sum of the numbers of letters in the words \"language\" and \"model\"?";
String answer = assistant.chat(question);
System.out.println(answer);
// The square root of the sum of the number of letters in the words "hello" and "world" is approximately 3.162.
}
}
| [
"dev.langchain4j.service.AiServices.builder"
] | [((824, 1085), 'dev.langchain4j.service.AiServices.builder'), ((824, 1060), 'dev.langchain4j.service.AiServices.builder'), ((824, 987), 'dev.langchain4j.service.AiServices.builder'), ((824, 946), 'dev.langchain4j.service.AiServices.builder')] |
package bot;
import dev.langchain4j.chain.ConversationalRetrievalChain;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.huggingface.HuggingFaceChatModel;
import dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument;
import static java.time.Duration.ofSeconds;
import java.io.File;
public class Bot {
//HuggingFace API
public static final String HF_API_KEY = "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";
public static String perguntar(String arquivo, String pergunta) throws Exception {
Document document = loadDocument(toPath(arquivo));
//choosing an embedding model
EmbeddingModel embeddingModel = HuggingFaceEmbeddingModel.builder()
.accessToken(HF_API_KEY)
.modelId("sentence-transformers/all-MiniLM-L6-v2")
.waitForModel(true)
.timeout(ofSeconds(90))
.build();
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
//embedding doc
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(DocumentSplitters.recursive(200))
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
ingestor.ingest(document);
//choosing a model to predict
ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder()
.chatLanguageModel(HuggingFaceChatModel.withAccessToken(HF_API_KEY))
.retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel))
// .promptTemplate() // you can override default prompt template
.chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY)) // .chatMemory() // you can override default chat memory
.build();
//predict
String answer = chain.execute(pergunta);
return answer;
}
private static Path toPath(String fileName) throws MalformedURLException {
try {
File file = new File(fileName);
URL fileUrl = file.toURI().toURL();
return Paths.get(fileUrl.toURI());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
} | [
"dev.langchain4j.chain.ConversationalRetrievalChain.builder",
"dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((1383, 1627), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1383, 1602), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1383, 1562), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1383, 1526), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1383, 1459), 'dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel.builder'), ((1790, 2005), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1790, 1980), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1790, 1932), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1790, 1884), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2155, 2617), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2155, 2535), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2155, 2367), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2155, 2278), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder')] |
package dev.morvan.interceptor;
import static dev.langchain4j.data.document.splitter.DocumentSplitters.recursive;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import io.quarkiverse.langchain4j.redis.RedisEmbeddingStore;
import io.quarkus.runtime.StartupEvent;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import java.io.File;
import java.util.Collections;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.eclipse.microprofile.config.inject.ConfigProperty;
@Slf4j
@ApplicationScoped
public class Ingestor {
@Inject
RedisEmbeddingStore store;
@Inject
EmbeddingModel embeddingModel;
@ConfigProperty(name = "prompts.init.filepath")
String originalDirPath;
public void ingest(@Observes StartupEvent event) {
List<Document> documents = FileSystemDocumentLoader.loadDocuments(
new File(originalDirPath).toPath(),
new TextDocumentParser());
ingest(documents);
}
public void ingest(String filePath) {
Document document = FileSystemDocumentLoader.loadDocument(
new File(filePath).toPath(),
new TextDocumentParser());
ingest(Collections.singletonList(document));
log.info("Ingested document: {}", filePath);
}
private void ingest(List<Document> documents) {
log.info("Ingesting documents...");
var ingestor = EmbeddingStoreIngestor.builder()
.embeddingStore(store)
.embeddingModel(embeddingModel)
.documentSplitter(recursive(500, 0))
.build();
ingestor.ingest(documents);
log.info("Ingested {} documents", documents.size());
}
}
| [
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder"
] | [((1724, 1921), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1724, 1896), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1724, 1843), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1724, 1795), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder')] |
package com.egineering.ai.llmjavademo.agents;
import com.egineering.ai.llmjavademo.configurations.LlmConfiguration;
import com.egineering.ai.llmjavademo.dtos.LlmResponse;
import com.egineering.ai.llmjavademo.dtos.MessageForm;
import com.egineering.ai.llmjavademo.dtos.StreamingLlmResponse;
import com.egineering.ai.llmjavademo.models.chromadbapi.Collection;
import com.egineering.ai.llmjavademo.models.chromadbapi.QueryRequest;
import com.egineering.ai.llmjavademo.models.chromadbapi.QueryResponse;
import com.egineering.ai.llmjavademo.services.ChromaClient;
import dev.langchain4j.data.document.Metadata;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.SystemMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.store.embedding.EmbeddingMatch;
import lombok.SneakyThrows;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.io.Resource;
import org.springframework.messaging.simp.SimpMessagingTemplate;
import org.springframework.stereotype.Service;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
@Service
public class DocumentStreamingAgent {
@Value("classpath:prompts/documentsSystemMessage.st")
private Resource documentsSystemMessage;
private final SimpMessagingTemplate messagingTemplate;
private final EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
private final ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(20);
private final StreamingChatLanguageModel streamingChatModel;
private final ChromaClient chromaClient;
private final String collectionId;
public DocumentStreamingAgent(LlmConfiguration configuration, StreamingChatLanguageModel streamingChatModel, SimpMessagingTemplate messagingTemplate) {
this.messagingTemplate = messagingTemplate;
this.streamingChatModel = streamingChatModel;
this.chromaClient = new ChromaClient("http://localhost:8000", Duration.of(10, ChronoUnit.SECONDS));
Collection collection = chromaClient.collection("documents");
this.collectionId = collection.getId();
}
@SneakyThrows
public StreamingLlmResponse generate(MessageForm form) {
String prompt;
try {
prompt = documentsSystemMessage.getContentAsString(StandardCharsets.UTF_8);
} catch (IOException ioe) {
prompt = form.message();
}
Embedding questionEmbedding = embeddingModel.embed(form.message()).content();
QueryRequest queryRequest = new QueryRequest(questionEmbedding.vectorAsList(), 10);
QueryResponse queryResponse = chromaClient.queryCollection(this.collectionId, queryRequest);
List<EmbeddingMatch<TextSegment>> matches = toEmbeddingMatches(queryResponse);
List<String> documents = matches.stream()
.map(textSegmentEmbeddingMatch -> textSegmentEmbeddingMatch.embedded().text())
.toList();
String documentString = String.join("\n", documents);
SystemMessage systemMessage = PromptTemplate.from(prompt)
.apply(Map.of("documents", documentString))
.toSystemMessage();
chatMemory.add(systemMessage);
chatMemory.add(UserMessage.from(form.message()));
CompletableFuture<AiMessage> futureAiMessage = new CompletableFuture<>();
StreamingResponseHandler<AiMessage> handler = new StreamingResponseHandler<>() {
@Override
public void onNext(String token) {
messagingTemplate.convertAndSend("/topic/documents/llmStreamingResponse", new LlmResponse(token));
}
@Override
public void onComplete(Response<AiMessage> response) {
futureAiMessage.complete(response.content());
}
@Override
public void onError(Throwable error) {
error.printStackTrace();
}
};
streamingChatModel.generate(chatMemory.messages(), handler);
chatMemory.add(futureAiMessage.get());
Set<String> files = queryResponse.metadatas().get(0).stream()
.map(map -> map.get("file_name"))
.collect(Collectors.toSet());
return new StreamingLlmResponse(chatMemory.messages(), documents, files);
}
public void reset() {
chatMemory.clear();
}
private static List<EmbeddingMatch<TextSegment>> toEmbeddingMatches(QueryResponse queryResponse) {
List<EmbeddingMatch<TextSegment>> embeddingMatches = new ArrayList<>();
for(int i = 0; i < queryResponse.ids().get(0).size(); ++i) {
double score = distanceToScore((Double)((List<?>)queryResponse.distances().get(0)).get(i));
String embeddingId = (String)((List<?>)queryResponse.ids().get(0)).get(i);
Embedding embedding = Embedding.from((List)((List<?>)queryResponse.embeddings().get(0)).get(i));
TextSegment textSegment = toTextSegment(queryResponse, i);
embeddingMatches.add(new EmbeddingMatch<>(score, embeddingId, embedding, textSegment));
}
return embeddingMatches;
}
private static double distanceToScore(double distance) {
return 1.0 - distance / 2.0;
}
private static TextSegment toTextSegment(QueryResponse queryResponse, int i) {
String text = (String)((List<?>)queryResponse.documents().get(0)).get(i);
Map<String, String> metadata = (Map)((List<?>)queryResponse.metadatas().get(0)).get(i);
return text == null ? null : TextSegment.from(text, metadata == null ? new Metadata() : new Metadata(metadata));
}
} | [
"dev.langchain4j.model.input.PromptTemplate.from"
] | [((3866, 3988), 'dev.langchain4j.model.input.PromptTemplate.from'), ((3866, 3953), 'dev.langchain4j.model.input.PromptTemplate.from')] |
package com.kchandrakant;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.store.embedding.EmbeddingMatch;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import java.io.File;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static dev.langchain4j.data.document.FileSystemDocumentLoader.loadDocument;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
import static java.time.Duration.ofSeconds;
import static java.util.stream.Collectors.joining;
public class ChatWithDocuments {
public static void main(String[] args) {
// Load the document that includes the information you'd like to "chat" about with the model.
Document document = loadDocument(toPath("src/main/resources/example-files/simpson's_adventures.txt"));
// Split document into segments 100 tokens each
DocumentSplitter splitter = DocumentSplitters.recursive(
100,
0,
new OpenAiTokenizer(GPT_3_5_TURBO)
);
List<TextSegment> segments = splitter.split(document);
// Embed segments (convert them into vectors that represent the meaning) using embedding model
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
List<Embedding> embeddings = embeddingModel.embedAll(segments).content();
// Store embeddings into embedding store for further search / retrieval
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
embeddingStore.addAll(embeddings, segments);
// Specify the question you want to ask the model
String question = "Who is Simpson?";
// Embed the question
Embedding questionEmbedding = embeddingModel.embed(question).content();
// Find relevant embeddings in embedding store by semantic similarity
// You can play with parameters below to find a sweet spot for your specific use case
int maxResults = 3;
double minScore = 0.7;
List<EmbeddingMatch<TextSegment>> relevantEmbeddings
= embeddingStore.findRelevant(questionEmbedding, maxResults, minScore);
// Create a prompt for the model that includes question and relevant embeddings
PromptTemplate promptTemplate = PromptTemplate.from(
"Answer the following question to the best of your ability:\n"
+ "\n"
+ "Question:\n"
+ "{{question}}\n"
+ "\n"
+ "Base your answer on the following information:\n"
+ "{{information}}");
String information = relevantEmbeddings.stream()
.map(match -> match.embedded().text())
.collect(joining("\n\n"));
Map<String, Object> variables = new HashMap<>();
variables.put("question", question);
variables.put("information", information);
Prompt prompt = promptTemplate.apply(variables);
// Send the prompt to the OpenAI chat model
ChatLanguageModel chatModel = OpenAiChatModel.builder()
.apiKey(ApiKeys.OPENAI_API_KEY)
.timeout(ofSeconds(60))
.build();
AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content();
// See an answer from the model
String answer = aiMessage.text();
System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille...
}
private static Path toPath(String fileName) {
try {
//URL fileUrl = ChatWithDocuments.class.getResource(fileName);
URL fileUrl = new File(fileName).toURI().toURL();
System.out.println(new File(fileName).toURI().toURL());
return Paths.get(fileUrl.toURI());
} catch (URISyntaxException | MalformedURLException e) {
throw new RuntimeException(e);
}
}
} | [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((4007, 4145), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((4007, 4120), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((4007, 4080), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
import org.testcontainers.containers.PostgreSQLContainer;
import org.testcontainers.utility.DockerImageName;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingMatch;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore;
import java.util.List;
public class PGVectorEmbeddingStoreExample {
public static void main(String[] args) {
PostgreSQLContainer<?> postgreSQLContainer = new PostgreSQLContainer<>(
DockerImageName.parse("ankane/pgvector:v0.5.1").asCompatibleSubstituteFor("postgres"));
postgreSQLContainer.start();
EmbeddingStore<TextSegment> embeddingStore = PgVectorEmbeddingStore.builder()
.host(postgreSQLContainer.getHost())
.port(postgreSQLContainer.getFirstMappedPort())
.database(postgreSQLContainer.getDatabaseName())
.user(postgreSQLContainer.getUsername())
.password(postgreSQLContainer.getPassword())
.table("test")
.dimension(384)
.build();
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
TextSegment segment1 = TextSegment.from("I like football.");
Embedding embedding1 = embeddingModel.embed(segment1).content();
embeddingStore.add(embedding1, segment1);
TextSegment segment2 = TextSegment.from("The weather is good today.");
Embedding embedding2 = embeddingModel.embed(segment2).content();
embeddingStore.add(embedding2, segment2);
Embedding queryEmbedding = embeddingModel.embed("What is your favourite sport?").content();
List<EmbeddingMatch<TextSegment>> relevant = embeddingStore.findRelevant(queryEmbedding, 1);
EmbeddingMatch<TextSegment> embeddingMatch = relevant.get(0);
System.out.println(embeddingMatch.score()); // 0.8144288659095
System.out.println(embeddingMatch.embedded().text()); // I like football.
postgreSQLContainer.stop();
}
}
| [
"dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder"
] | [((744, 829), 'org.testcontainers.utility.DockerImageName.parse'), ((926, 1354), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1328), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1295), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1263), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1201), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1143), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1077), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder'), ((926, 1012), 'dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore.builder')] |
import dev.langchain4j.code.Judge0JavaScriptExecutionTool;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.service.AiServices;
import static java.time.Duration.ofSeconds;
public class ServiceWithDynamicToolsExample {
interface Assistant {
String chat(String message);
}
public static void main(String[] args) {
Judge0JavaScriptExecutionTool judge0Tool = new Judge0JavaScriptExecutionTool(ApiKeys.RAPID_API_KEY);
ChatLanguageModel chatLanguageModel = OpenAiChatModel.builder()
.apiKey(ApiKeys.OPENAI_API_KEY)
.temperature(0.0)
.timeout(ofSeconds(60))
.build();
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(MessageWindowChatMemory.withMaxMessages(20))
.tools(judge0Tool)
.build();
interact(assistant, "What is the square root of 49506838032859?");
interact(assistant, "Capitalize every third letter: abcabc");
interact(assistant, "What is the number of hours between 17:00 on 21 Feb 1988 and 04:00 on 12 Apr 2014?");
}
private static void interact(Assistant assistant, String userMessage) {
System.out.println("[User]: " + userMessage);
String answer = assistant.chat(userMessage);
System.out.println("[Assistant]: " + answer);
System.out.println();
System.out.println();
}
}
| [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((634, 806), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((634, 781), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((634, 741), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((634, 707), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((839, 1061), 'dev.langchain4j.service.AiServices.builder'), ((839, 1036), 'dev.langchain4j.service.AiServices.builder'), ((839, 1001), 'dev.langchain4j.service.AiServices.builder'), ((839, 928), 'dev.langchain4j.service.AiServices.builder')] |
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.MemoryId;
import dev.langchain4j.service.UserMessage;
public class ServiceWithMemoryForEachUserExample {
interface Assistant {
String chat(@MemoryId int memoryId, @UserMessage String userMessage);
}
public static void main(String[] args) {
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY))
.chatMemoryProvider(memoryId -> MessageWindowChatMemory.withMaxMessages(10))
.build();
System.out.println(assistant.chat(1, "Hello, my name is Klaus"));
// Hi Klaus! How can I assist you today?
System.out.println(assistant.chat(2, "Hello, my name is Francine"));
// Hello Francine! How can I assist you today?
System.out.println(assistant.chat(1, "What is my name?"));
// Your name is Klaus.
System.out.println(assistant.chat(2, "What is my name?"));
// Your name is Francine.
}
} | [
"dev.langchain4j.service.AiServices.builder"
] | [((482, 722), 'dev.langchain4j.service.AiServices.builder'), ((482, 697), 'dev.langchain4j.service.AiServices.builder'), ((482, 604), 'dev.langchain4j.service.AiServices.builder')] |
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingMatch;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.chroma.ChromaEmbeddingStore;
import java.util.List;
import static dev.langchain4j.internal.Utils.randomUUID;
public class ChromaEmbeddingStoreExample {
/**
* To run this example, ensure you have Chroma running locally. If not, then:
* - Execute "docker pull ghcr.io/chroma-core/chroma:0.4.6"
* - Execute "docker run -d -p 8000:8000 ghcr.io/chroma-core/chroma:0.4.6"
* - Wait until Chroma is ready to serve (may take a few minutes)
*/
public static void main(String[] args) {
EmbeddingStore<TextSegment> embeddingStore = ChromaEmbeddingStore.builder()
.baseUrl("http://localhost:8000")
.collectionName(randomUUID())
.build();
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
TextSegment segment1 = TextSegment.from("I like football.");
Embedding embedding1 = embeddingModel.embed(segment1).content();
embeddingStore.add(embedding1, segment1);
TextSegment segment2 = TextSegment.from("The weather is good today.");
Embedding embedding2 = embeddingModel.embed(segment2).content();
embeddingStore.add(embedding2, segment2);
Embedding queryEmbedding = embeddingModel.embed("What is your favourite sport?").content();
List<EmbeddingMatch<TextSegment>> relevant = embeddingStore.findRelevant(queryEmbedding, 1);
EmbeddingMatch<TextSegment> embeddingMatch = relevant.get(0);
System.out.println(embeddingMatch.score()); // 0.8144288493114709
System.out.println(embeddingMatch.embedded().text()); // I like football.
}
} | [
"dev.langchain4j.store.embedding.chroma.ChromaEmbeddingStore.builder"
] | [((937, 1088), 'dev.langchain4j.store.embedding.chroma.ChromaEmbeddingStore.builder'), ((937, 1063), 'dev.langchain4j.store.embedding.chroma.ChromaEmbeddingStore.builder'), ((937, 1017), 'dev.langchain4j.store.embedding.chroma.ChromaEmbeddingStore.builder')] |
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.memory.chat.ChatMemoryStore;
import org.mapdb.DB;
import org.mapdb.DBMaker;
import java.util.List;
import java.util.Map;
import static dev.langchain4j.data.message.ChatMessageDeserializer.messagesFromJson;
import static dev.langchain4j.data.message.ChatMessageSerializer.messagesToJson;
import static org.mapdb.Serializer.STRING;
public class ServiceWithPersistentMemoryExample {
interface Assistant {
String chat(String message);
}
public static void main(String[] args) {
ChatMemory chatMemory = MessageWindowChatMemory.builder()
.maxMessages(10)
.chatMemoryStore(new PersistentChatMemoryStore())
.build();
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY))
.chatMemory(chatMemory)
.build();
String answer = assistant.chat("Hello! My name is Klaus.");
System.out.println(answer); // Hello Klaus! How can I assist you today?
// Now, comment out the two lines above, uncomment the two lines below, and run again.
// String answerWithName = assistant.chat("What is my name?");
// System.out.println(answerWithName); // Your name is Klaus.
}
// You can create your own implementation of ChatMemoryStore and store chat memory whenever you'd like
static class PersistentChatMemoryStore implements ChatMemoryStore {
private final DB db = DBMaker.fileDB("chat-memory.db").transactionEnable().make();
private final Map<String, String> map = db.hashMap("messages", STRING, STRING).createOrOpen();
@Override
public List<ChatMessage> getMessages(Object memoryId) {
String json = map.get((String) memoryId);
return messagesFromJson(json);
}
@Override
public void updateMessages(Object memoryId, List<ChatMessage> messages) {
String json = messagesToJson(messages);
map.put((String) memoryId, json);
db.commit();
}
@Override
public void deleteMessages(Object memoryId) {
map.remove((String) memoryId);
db.commit();
}
}
}
| [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.memory.chat.MessageWindowChatMemory.builder"
] | [((809, 966), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((809, 941), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((809, 875), 'dev.langchain4j.memory.chat.MessageWindowChatMemory.builder'), ((999, 1186), 'dev.langchain4j.service.AiServices.builder'), ((999, 1161), 'dev.langchain4j.service.AiServices.builder'), ((999, 1121), 'dev.langchain4j.service.AiServices.builder'), ((1792, 1851), 'org.mapdb.DBMaker.fileDB'), ((1792, 1844), 'org.mapdb.DBMaker.fileDB')] |
/*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.ai;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ImageContent;
import dev.langchain4j.data.message.SystemMessage;
import dev.langchain4j.data.message.TextContent;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel;
import dev.langchain4j.service.AiServices;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
@Service
public class VertexAIClient {
private static final Logger logger = LoggerFactory.getLogger(VertexAIClient.class);
@Value("${spring.ai.vertex.ai.gemini.project-id}")
private String project;
@Value("${spring.ai.vertex.ai.gemini.location}")
private String location;
public String promptOnImage(String prompt,
String bucketName,
String fileName) throws IOException {
long start = System.currentTimeMillis();
// bucket where image has been uploaded
String imageURL = String.format("gs://%s/%s",bucketName, fileName);
UserMessage userMessage = UserMessage.from(
// ImageContent.from(Base64.getEncoder().encodeToString(readBytes("https://storage.googleapis.com/vision-optimize-serverless-apps/TheJungleBook.jpg")), "image/jpeg"),
ImageContent.from(imageURL),
TextContent.from(prompt)
);
ChatLanguageModel visionModel = VertexAiGeminiChatModel.builder()
.project(project)
.location(location)
.modelName(VertexModels.GEMINI_PRO_VISION)
.build();
Response<AiMessage> multiModalResponse = visionModel.generate(userMessage);
String response = multiModalResponse.content().text();
logger.info("Multi-modal response: " + response);
// response from Vertex is in Markdown, remove annotations
response = response.replaceAll("```json", "").replaceAll("```", "").replace("'", "\"");
logger.info("Elapsed time (chat model): " + (System.currentTimeMillis() - start) + "ms");
// return the response in String format, extract values in caller
return response;
}
public String promptModel(String prompt) {
long start = System.currentTimeMillis();
logger.info("Chat model: " + prompt);
ChatLanguageModel model = VertexAiGeminiChatModel.builder()
.project(project)
.location(location)
.modelName(VertexModels.GEMINI_PRO)
.build();
// prompt Chat model
String output = model.generate(prompt);
logger.info("Elapsed time (chat model, with SpringAI): " + (System.currentTimeMillis() - start) + "ms");
logger.info("Chat Model output: " + output);
// return model response in String format
return output;
}
interface Assistant {
String chat(UserMessage userMessage);
}
public String promptModelwithFunctionCalls(SystemMessage systemMessage,
UserMessage userMessage,
Object function) {
long start = System.currentTimeMillis();
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
// chatMemory.add(systemMessage);
ChatLanguageModel model = VertexAiGeminiChatModel.builder()
.project(project)
.location(location)
.modelName(VertexModels.GEMINI_PRO)
.build();
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(model)
.chatMemory(chatMemory)
.tools(function)
.build();
String output = assistant.chat(userMessage);
logger.info("Elapsed time (chat model, with Langchain4J): " + (System.currentTimeMillis() - start) + "ms");
logger.info("Chat Model output with Function Call: " + output);
// return model response in String format
return output;
}
}
| [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder"
] | [((2356, 2527), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((2356, 2506), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((2356, 2451), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((2356, 2419), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((3282, 3446), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((3282, 3425), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((3282, 3377), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((3282, 3345), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((4282, 4446), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((4282, 4425), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((4282, 4377), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((4282, 4345), 'dev.langchain4j.model.vertexai.VertexAiGeminiChatModel.builder'), ((4479, 4638), 'dev.langchain4j.service.AiServices.builder'), ((4479, 4617), 'dev.langchain4j.service.AiServices.builder'), ((4479, 4588), 'dev.langchain4j.service.AiServices.builder'), ((4479, 4552), 'dev.langchain4j.service.AiServices.builder')] |
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import java.util.ArrayList;
import java.util.List;
import static java.time.Duration.ofSeconds;
public class _06_FewShot {
public static void main(String[] args) {
OpenAiStreamingChatModel model = OpenAiStreamingChatModel.builder()
.apiKey(ApiKeys.OPENAI_API_KEY)
.timeout(ofSeconds(100))
.build();
List<ChatMessage> fewShotHistory = new ArrayList<>();
// Adding positive feedback example to history
fewShotHistory.add(UserMessage.from(
"I love the new update! The interface is very user-friendly and the new features are amazing!"));
fewShotHistory.add(AiMessage.from(
"Action: forward input to positive feedback storage\nReply: Thank you very much for this great feedback! We have transmitted your message to our product development team who will surely be very happy to hear this. We hope you continue enjoying using our product."));
// Adding negative feedback example to history
fewShotHistory.add(UserMessage
.from("I am facing frequent crashes after the new update on my Android device."));
fewShotHistory.add(AiMessage.from(
"Action: open new ticket - crash after update Android\nReply: We are so sorry to hear about the issues you are facing. We have reported the problem to our development team and will make sure this issue is addressed as fast as possible. We will send you an email when the fix is done, and we are always at your service for any further assistance you may need."));
// Adding another positive feedback example to history
fewShotHistory.add(UserMessage
.from("Your app has made my daily tasks so much easier! Kudos to the team!"));
fewShotHistory.add(AiMessage.from(
"Action: forward input to positive feedback storage\nReply: Thank you so much for your kind words! We are thrilled to hear that our app is making your daily tasks easier. Your feedback has been shared with our team. We hope you continue to enjoy using our app!"));
// Adding another negative feedback example to history
fewShotHistory.add(UserMessage
.from("The new feature is not working as expected. It’s causing data loss."));
fewShotHistory.add(AiMessage.from(
"Action: open new ticket - data loss by new feature\nReply:We apologize for the inconvenience caused. Your feedback is crucial to us, and we have reported this issue to our technical team. They are working on it on priority. We will keep you updated on the progress and notify you once the issue is resolved. Thank you for your patience and support."));
// Adding real user's message
ChatMessage customerComplaint = UserMessage
.from("How can your app be so slow? Please do something about it!");
fewShotHistory.add(customerComplaint);
System.out.println("[User]: " + customerComplaint.text());
System.out.print("[LLM]: ");
model.generate(fewShotHistory, new StreamingResponseHandler<AiMessage>() {
@Override
public void onNext(String token) {
System.out.print(token);
}
@Override
public void onError(Throwable throwable) {
}
});
// Extract reply and send to customer
// Perform necessary action in back-end
}
}
| [
"dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder"
] | [((475, 623), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((475, 598), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder'), ((475, 557), 'dev.langchain4j.model.openai.OpenAiStreamingChatModel.builder')] |
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.structured.StructuredPrompt;
import dev.langchain4j.model.input.structured.StructuredPromptProcessor;
import dev.langchain4j.model.openai.OpenAiChatModel;
import java.util.List;
import static java.time.Duration.ofSeconds;
import static java.util.Arrays.asList;
public class StructuredPromptTemplateExamples {
static ChatLanguageModel model = OpenAiChatModel.builder()
.apiKey(ApiKeys.OPENAI_API_KEY)
.timeout(ofSeconds(60))
.build();
static class Simple_Structured_Prompt_Example {
@StructuredPrompt("Create a recipe of a {{dish}} that can be prepared using only {{ingredients}}")
static class CreateRecipePrompt {
private String dish;
private List<String> ingredients;
}
public static void main(String[] args) {
CreateRecipePrompt createRecipePrompt = new CreateRecipePrompt();
createRecipePrompt.dish = "salad";
createRecipePrompt.ingredients = asList("cucumber", "tomato", "feta", "onion", "olives");
Prompt prompt = StructuredPromptProcessor.toPrompt(createRecipePrompt);
AiMessage aiMessage = model.generate(prompt.toUserMessage()).content();
System.out.println(aiMessage.text());
}
}
static class Multi_Line_Structured_Prompt_Example {
@StructuredPrompt({
"Create a recipe of a {{dish}} that can be prepared using only {{ingredients}}.",
"Structure your answer in the following way:",
"Recipe name: ...",
"Description: ...",
"Preparation time: ...",
"Required ingredients:",
"- ...",
"- ...",
"Instructions:",
"- ...",
"- ..."
})
static class CreateRecipePrompt {
private String dish;
private List<String> ingredients;
}
public static void main(String[] args) {
CreateRecipePrompt createRecipePrompt = new CreateRecipePrompt();
createRecipePrompt.dish = "salad";
createRecipePrompt.ingredients = asList("cucumber", "tomato", "feta", "onion", "olives");
Prompt prompt = StructuredPromptProcessor.toPrompt(createRecipePrompt);
AiMessage aiMessage = model.generate(prompt.toUserMessage()).content();
System.out.println(aiMessage.text());
}
}
}
| [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((528, 654), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((528, 633), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((528, 597), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
package zin.rashidi.boot.langchain4j.history;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.env.Environment;
import static dev.langchain4j.memory.chat.MessageWindowChatMemory.withMaxMessages;
/**
* @author Rashidi Zin
*/
@Configuration
class HistorianConfiguration {
@Bean
Historian historian(ChatLanguageModel model, ContentRetriever retriever, HistorianTool tool) {
return AiServices.builder(Historian.class)
.chatLanguageModel(model)
.chatMemory(withMaxMessages(10))
.contentRetriever(retriever)
.tools(tool)
.build();
}
@Bean
ContentRetriever retriever(EmbeddingStore<TextSegment> embeddingStore) {
return EmbeddingStoreRetriever.from(embeddingStore, new AllMiniLmL6V2EmbeddingModel(), 1, 0.6)
.toContentRetriever();
}
@Bean
EmbeddingStore<TextSegment> embeddingStore(Environment environment) {
return ElasticsearchEmbeddingStore.builder()
.serverUrl(environment.getProperty("app.elasticsearch.uri"))
.indexName("history")
.build();
}
}
| [
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.retriever.EmbeddingStoreRetriever.from",
"dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder"
] | [((967, 1192), 'dev.langchain4j.service.AiServices.builder'), ((967, 1167), 'dev.langchain4j.service.AiServices.builder'), ((967, 1138), 'dev.langchain4j.service.AiServices.builder'), ((967, 1093), 'dev.langchain4j.service.AiServices.builder'), ((967, 1044), 'dev.langchain4j.service.AiServices.builder'), ((1303, 1428), 'dev.langchain4j.retriever.EmbeddingStoreRetriever.from'), ((1536, 1713), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1536, 1688), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder'), ((1536, 1650), 'dev.langchain4j.store.embedding.elasticsearch.ElasticsearchEmbeddingStore.builder')] |
import dev.langchain4j.model.openai.OpenAiChatModel;
import java.net.InetSocketAddress;
import java.net.Proxy;
import static java.net.Proxy.Type.HTTP;
public class ProxyExample {
public static void main(String[] args) {
OpenAiChatModel model = OpenAiChatModel.builder()
.apiKey(ApiKeys.OPENAI_API_KEY)
.proxy(new Proxy(HTTP, new InetSocketAddress("39.175.77.7", 30001)))
.build();
String answer = model.generate("hello");
System.out.println(answer);
}
}
| [
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((261, 444), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((261, 419), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((261, 334), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
import dev.langchain4j.chain.ConversationalChain;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.TokenWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import java.io.IOException;
import static dev.langchain4j.data.message.UserMessage.userMessage;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
public class ChatMemoryExamples {
// See also ServiceWithMemoryExample and ServiceWithMemoryForEachUserExample
public static class ConversationalChain_Example {
public static void main(String[] args) throws IOException {
ConversationalChain chain = ConversationalChain.builder()
.chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY))
// .chatMemory() // you can override default chat memory
.build();
String answer = chain.execute("Hello, my name is Klaus");
System.out.println(answer); // Hello Klaus! How can I assist you today?
String answerWithName = chain.execute("What is my name?");
System.out.println(answerWithName); // Your name is Klaus.
}
}
public static class If_You_Need_More_Control {
public static void main(String[] args) {
ChatLanguageModel model = OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY);
ChatMemory chatMemory = TokenWindowChatMemory.withMaxTokens(300, new OpenAiTokenizer(GPT_3_5_TURBO));
// You have full control over the chat memory.
// You can decide if you want to add a particular message to the memory
// (e.g. you might not want to store few-shot examples to save on tokens).
// You can process/modify the message before saving if required.
chatMemory.add(userMessage("Hello, my name is Klaus"));
AiMessage answer = model.generate(chatMemory.messages()).content();
System.out.println(answer.text()); // Hello Klaus! How can I assist you today?
chatMemory.add(answer);
chatMemory.add(userMessage("What is my name?"));
AiMessage answerWithName = model.generate(chatMemory.messages()).content();
System.out.println(answerWithName.text()); // Your name is Klaus.
chatMemory.add(answerWithName);
}
}
}
| [
"dev.langchain4j.chain.ConversationalChain.builder"
] | [((810, 1036), 'dev.langchain4j.chain.ConversationalChain.builder'), ((810, 930), 'dev.langchain4j.chain.ConversationalChain.builder')] |
package me.nzuguem.bot.configurations.llm;
import dev.langchain4j.rag.DefaultRetrievalAugmentor;
import dev.langchain4j.rag.RetrievalAugmentor;
import jakarta.enterprise.context.ApplicationScoped;
import java.util.function.Supplier;
@ApplicationScoped
public class GithubAppRetrievalAugmentor implements Supplier<RetrievalAugmentor> {
private final GithubAppContentRetriever retriever;
public GithubAppRetrievalAugmentor(GithubAppContentRetriever retriever) {
this.retriever = retriever;
}
@Override
public RetrievalAugmentor get() {
return DefaultRetrievalAugmentor.builder()
.contentRetriever(this.retriever)
.build();
}
}
| [
"dev.langchain4j.rag.DefaultRetrievalAugmentor.builder"
] | [((583, 693), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder'), ((583, 668), 'dev.langchain4j.rag.DefaultRetrievalAugmentor.builder')] |
import dev.langchain4j.chain.ConversationalRetrievalChain;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.input.Prompt;
import dev.langchain4j.model.input.PromptTemplate;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.retriever.EmbeddingStoreRetriever;
import dev.langchain4j.store.embedding.EmbeddingMatch;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Duration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
import static java.util.stream.Collectors.joining;
public class ChatWithDocumentsExamples {
// Please also check ServiceWithRetrieverExample
static class IfYouNeedSimplicity {
public static void main(String[] args) throws Exception {
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(DocumentSplitters.recursive(300, 0))
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
Document document = loadDocument(toPath("example-files/story-about-happy-carrot.txt"), new TextDocumentParser());
ingestor.ingest(document);
ConversationalRetrievalChain chain = ConversationalRetrievalChain.builder()
.chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY))
.retriever(EmbeddingStoreRetriever.from(embeddingStore, embeddingModel))
// .chatMemory() // you can override default chat memory
// .promptTemplate() // you can override default prompt template
.build();
String answer = chain.execute("Who is Charlie?");
System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille...
}
}
static class If_You_Need_More_Control {
public static void main(String[] args) {
// Load the document that includes the information you'd like to "chat" about with the model.
Document document = loadDocument(toPath("example-files/story-about-happy-carrot.txt"), new TextDocumentParser());
// Split document into segments 100 tokens each
DocumentSplitter splitter = DocumentSplitters.recursive(
100,
0,
new OpenAiTokenizer(GPT_3_5_TURBO)
);
List<TextSegment> segments = splitter.split(document);
// Embed segments (convert them into vectors that represent the meaning) using embedding model
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
List<Embedding> embeddings = embeddingModel.embedAll(segments).content();
// Store embeddings into embedding store for further search / retrieval
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
embeddingStore.addAll(embeddings, segments);
// Specify the question you want to ask the model
String question = "Who is Charlie?";
// Embed the question
Embedding questionEmbedding = embeddingModel.embed(question).content();
// Find relevant embeddings in embedding store by semantic similarity
// You can play with parameters below to find a sweet spot for your specific use case
int maxResults = 3;
double minScore = 0.7;
List<EmbeddingMatch<TextSegment>> relevantEmbeddings
= embeddingStore.findRelevant(questionEmbedding, maxResults, minScore);
// Create a prompt for the model that includes question and relevant embeddings
PromptTemplate promptTemplate = PromptTemplate.from(
"Answer the following question to the best of your ability:\n"
+ "\n"
+ "Question:\n"
+ "{{question}}\n"
+ "\n"
+ "Base your answer on the following information:\n"
+ "{{information}}");
String information = relevantEmbeddings.stream()
.map(match -> match.embedded().text())
.collect(joining("\n\n"));
Map<String, Object> variables = new HashMap<>();
variables.put("question", question);
variables.put("information", information);
Prompt prompt = promptTemplate.apply(variables);
// Send the prompt to the OpenAI chat model
ChatLanguageModel chatModel = OpenAiChatModel.builder()
.apiKey(ApiKeys.OPENAI_API_KEY)
.timeout(Duration.ofSeconds(60))
.build();
AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content();
// See an answer from the model
String answer = aiMessage.text();
System.out.println(answer); // Charlie is a cheerful carrot living in VeggieVille...
}
}
private static Path toPath(String fileName) {
try {
URL fileUrl = ChatWithDocumentsExamples.class.getResource(fileName);
return Paths.get(fileUrl.toURI());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
}
| [
"dev.langchain4j.chain.ConversationalRetrievalChain.builder",
"dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((1961, 2201), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1961, 2172), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1961, 2120), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((1961, 2068), 'dev.langchain4j.store.embedding.EmbeddingStoreIngestor.builder'), ((2419, 2832), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2419, 2641), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((2419, 2548), 'dev.langchain4j.chain.ConversationalRetrievalChain.builder'), ((5821, 5980), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5821, 5951), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5821, 5898), 'dev.langchain4j.model.openai.OpenAiChatModel.builder')] |
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.AllMiniLmL6V2EmbeddingModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.rag.query.Query;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.MemoryId;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.filter.Filter;
import dev.langchain4j.store.embedding.filter.builder.sql.LanguageModelSqlFilterBuilder;
import dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import org.junit.jupiter.api.Test;
import java.util.function.Function;
import static dev.langchain4j.data.document.Metadata.metadata;
import static dev.langchain4j.store.embedding.filter.MetadataFilterBuilder.metadataKey;
import static org.assertj.core.api.Assertions.assertThat;
class _06_Metadata_Filtering {
/**
* More information can be found here: https://github.com/langchain4j/langchain4j/pull/610
*/
ChatLanguageModel chatLanguageModel = OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.build();
EmbeddingModel embeddingModel = new AllMiniLmL6V2EmbeddingModel();
interface Assistant {
String answer(String query);
}
@Test
void Static_Metadata_Filter_Example() {
// given
TextSegment dogsSegment = TextSegment.from("Article about dogs ...", metadata("animal", "dog"));
TextSegment birdsSegment = TextSegment.from("Article about birds ...", metadata("animal", "bird"));
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
embeddingStore.add(embeddingModel.embed(dogsSegment).content(), dogsSegment);
embeddingStore.add(embeddingModel.embed(birdsSegment).content(), birdsSegment);
// embeddingStore contains segments about both dogs and birds
Filter onlyDogs = metadataKey("animal").isEqualTo("dog");
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.filter(onlyDogs) // by specifying the static filter, we limit the search to segments only about dogs
.build();
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(chatLanguageModel)
.contentRetriever(contentRetriever)
.build();
// when
String answer = assistant.answer("Which animal?");
// then
assertThat(answer)
.containsIgnoringCase("dog")
.doesNotContainIgnoringCase("bird");
}
interface PersonalizedAssistant {
String chat(@MemoryId String userId, @dev.langchain4j.service.UserMessage String userMessage);
}
@Test
void Dynamic_Metadata_Filter_Example() {
// given
TextSegment user1Info = TextSegment.from("My favorite color is green", metadata("userId", "1"));
TextSegment user2Info = TextSegment.from("My favorite color is red", metadata("userId", "2"));
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
embeddingStore.add(embeddingModel.embed(user1Info).content(), user1Info);
embeddingStore.add(embeddingModel.embed(user2Info).content(), user2Info);
// embeddingStore contains information about both first and second user
Function<Query, Filter> filterByUserId =
(query) -> metadataKey("userId").isEqualTo(query.metadata().chatMemoryId().toString());
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
// by specifying the dynamic filter, we limit the search to segments that belong only to the current user
.dynamicFilter(filterByUserId)
.build();
PersonalizedAssistant personalizedAssistant = AiServices.builder(PersonalizedAssistant.class)
.chatLanguageModel(chatLanguageModel)
.contentRetriever(contentRetriever)
.build();
// when
String answer1 = personalizedAssistant.chat("1", "Which color would be best for a dress?");
// then
assertThat(answer1)
.containsIgnoringCase("green")
.doesNotContainIgnoringCase("red");
// when
String answer2 = personalizedAssistant.chat("2", "Which color would be best for a dress?");
// then
assertThat(answer2)
.containsIgnoringCase("red")
.doesNotContainIgnoringCase("green");
}
@Test
void LLM_generated_Metadata_Filter_Example() {
// given
TextSegment forrestGump = TextSegment.from("Forrest Gump", metadata("genre", "drama").put("year", 1994));
TextSegment groundhogDay = TextSegment.from("Groundhog Day", metadata("genre", "comedy").put("year", 1993));
TextSegment dieHard = TextSegment.from("Die Hard", metadata("genre", "action").put("year", 1998));
// describe metadata keys as if they were columns in the SQL table
TableDefinition tableDefinition = TableDefinition.builder()
.name("movies")
.addColumn("genre", "VARCHAR", "one of: [comedy, drama, action]")
.addColumn("year", "INT")
.build();
LanguageModelSqlFilterBuilder sqlFilterBuilder = new LanguageModelSqlFilterBuilder(chatLanguageModel, tableDefinition);
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
embeddingStore.add(embeddingModel.embed(forrestGump).content(), forrestGump);
embeddingStore.add(embeddingModel.embed(groundhogDay).content(), groundhogDay);
embeddingStore.add(embeddingModel.embed(dieHard).content(), dieHard);
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.dynamicFilter(query -> sqlFilterBuilder.build(query)) // LLM will generate the filter dynamically
.build();
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(chatLanguageModel)
.contentRetriever(contentRetriever)
.build();
// when
String answer = assistant.answer("Recommend me a good drama from 90s");
// then
assertThat(answer)
.containsIgnoringCase("Forrest Gump")
.doesNotContainIgnoringCase("Groundhog Day")
.doesNotContainIgnoringCase("Die Hard");
}
}
| [
"dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition.builder",
"dev.langchain4j.service.AiServices.builder",
"dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((1356, 1455), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1356, 1434), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2328, 2607), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2328, 2498), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2328, 2464), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2328, 2416), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((2640, 2806), 'dev.langchain4j.service.AiServices.builder'), ((2640, 2781), 'dev.langchain4j.service.AiServices.builder'), ((2640, 2729), 'dev.langchain4j.service.AiServices.builder'), ((3993, 4323), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3993, 4298), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3993, 4129), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((3993, 4081), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((4380, 4558), 'dev.langchain4j.service.AiServices.builder'), ((4380, 4533), 'dev.langchain4j.service.AiServices.builder'), ((4380, 4481), 'dev.langchain4j.service.AiServices.builder'), ((5624, 5830), 'dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition.builder'), ((5624, 5805), 'dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition.builder'), ((5624, 5763), 'dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition.builder'), ((5624, 5681), 'dev.langchain4j.store.embedding.filter.builder.sql.TableDefinition.builder'), ((6344, 6620), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((6344, 6551), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((6344, 6480), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((6344, 6432), 'dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever.builder'), ((6653, 6819), 'dev.langchain4j.service.AiServices.builder'), ((6653, 6794), 'dev.langchain4j.service.AiServices.builder'), ((6653, 6742), 'dev.langchain4j.service.AiServices.builder')] |
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiModerationModel;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.Moderate;
import dev.langchain4j.service.ModerationException;
public class ServiceWithAutoModerationExample {
interface Chat {
@Moderate
String chat(String text);
}
public static void main(String[] args) {
OpenAiModerationModel moderationModel = OpenAiModerationModel.withApiKey(ApiKeys.OPENAI_API_KEY);
Chat chat = AiServices.builder(Chat.class)
.chatLanguageModel(OpenAiChatModel.withApiKey(ApiKeys.OPENAI_API_KEY))
.moderationModel(moderationModel)
.build();
try {
chat.chat("I WILL KILL YOU!!!");
} catch (ModerationException e) {
System.out.println(e.getMessage());
// Text "I WILL KILL YOU!!!" violates content policy
}
}
}
| [
"dev.langchain4j.service.AiServices.builder"
] | [((552, 744), 'dev.langchain4j.service.AiServices.builder'), ((552, 719), 'dev.langchain4j.service.AiServices.builder'), ((552, 669), 'dev.langchain4j.service.AiServices.builder')] |
package dev.langchain4j.data.message;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import org.assertj.core.api.WithAssertions;
import org.junit.jupiter.api.Test;
class ToolExecutionResultMessageTest implements WithAssertions {
@Test
public void test_methods() {
ToolExecutionResultMessage tm = new ToolExecutionResultMessage("id", "toolName", "text");
assertThat(tm.id()).isEqualTo("id");
assertThat(tm.toolName()).isEqualTo("toolName");
assertThat(tm.text()).isEqualTo("text");
assertThat(tm.type()).isEqualTo(ChatMessageType.TOOL_EXECUTION_RESULT);
assertThat(tm)
.hasToString(
"ToolExecutionResultMessage " +
"{ id = \"id\" toolName = \"toolName\" text = \"text\" }");
}
@Test
public void test_equals_hashCode() {
ToolExecutionResultMessage t1 = new ToolExecutionResultMessage("id", "toolName", "text");
ToolExecutionResultMessage t2 = new ToolExecutionResultMessage("id", "toolName", "text");
ToolExecutionResultMessage t3 = new ToolExecutionResultMessage("foo", "toolName", "text");
ToolExecutionResultMessage t4 = new ToolExecutionResultMessage("foo", "toolName", "text");
assertThat(t1)
.isEqualTo(t1)
.isNotEqualTo(null)
.isNotEqualTo(new Object())
.isEqualTo(t2)
.hasSameHashCodeAs(t2)
.isNotEqualTo(ToolExecutionResultMessage.from(
"changed", "toolName", "text"))
.isNotEqualTo(ToolExecutionResultMessage.from(
"id", "changed", "text"))
.isNotEqualTo(ToolExecutionResultMessage.from(
"id", "toolName", "changed"))
.isNotEqualTo(t3)
.doesNotHaveSameHashCodeAs(t3);
assertThat(t3)
.isEqualTo(t3)
.isEqualTo(t4)
.hasSameHashCodeAs(t4);
}
@Test
public void test_builders() {
ToolExecutionRequest request = ToolExecutionRequest.builder()
.id("id")
.name("toolName")
.arguments("arguments")
.build();
assertThat(new ToolExecutionResultMessage("id", "toolName", "text"))
.isEqualTo(ToolExecutionResultMessage.from("id", "toolName", "text"))
.isEqualTo(ToolExecutionResultMessage.from(request, "text"))
.isEqualTo(ToolExecutionResultMessage.toolExecutionResultMessage(
"id", "toolName", "text"))
.isEqualTo(ToolExecutionResultMessage.toolExecutionResultMessage(request, "text"));
}
} | [
"dev.langchain4j.agent.tool.ToolExecutionRequest.builder"
] | [((2103, 2258), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((2103, 2233), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((2103, 2193), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder'), ((2103, 2159), 'dev.langchain4j.agent.tool.ToolExecutionRequest.builder')] |
package dev.langchain4j.service;
import dev.langchain4j.agent.tool.P;
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.agent.tool.ToolSpecification;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.ToolExecutionResultMessage;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.model.output.TokenUsage;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Spy;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.Arrays;
import java.util.List;
import static dev.langchain4j.agent.tool.JsonSchemaProperty.description;
import static dev.langchain4j.agent.tool.JsonSchemaProperty.*;
import static dev.langchain4j.model.openai.OpenAiChatModelName.GPT_3_5_TURBO_0613;
import static dev.langchain4j.model.output.FinishReason.STOP;
import static dev.langchain4j.service.AiServicesWithToolsIT.TemperatureUnit.Kelvin;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.data.Percentage.withPercentage;
import static org.mockito.Mockito.*;
@ExtendWith(MockitoExtension.class)
class AiServicesWithToolsIT {
@Spy
ChatLanguageModel chatLanguageModel = OpenAiChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.temperature(0.0)
.logRequests(true)
.logResponses(true)
.build();
@AfterEach
void afterEach() {
verifyNoMoreInteractions(chatLanguageModel);
}
interface Assistant {
Response<AiMessage> chat(String userMessage);
}
static class Calculator {
static ToolSpecification EXPECTED_SPECIFICATION = ToolSpecification.builder()
.name("squareRoot")
.description("calculates the square root of the provided number")
.addParameter("arg0", NUMBER, description("number to operate on"))
.build();
@Tool("calculates the square root of the provided number")
double squareRoot(@P("number to operate on") double number) {
System.out.printf("called squareRoot(%s)%n", number);
return Math.sqrt(number);
}
}
@Test
void should_execute_a_tool_then_answer() {
Calculator calculator = spy(new Calculator());
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(chatMemory)
.tools(calculator)
.build();
String userMessage = "What is the square root of 485906798473894056 in scientific notation?";
Response<AiMessage> response = assistant.chat(userMessage);
assertThat(response.content().text()).contains("6.97");
TokenUsage tokenUsage = response.tokenUsage();
assertThat(tokenUsage.inputTokenCount()).isEqualTo(72 + 109);
assertThat(tokenUsage.outputTokenCount()).isCloseTo(20 + 31, withPercentage(5));
assertThat(tokenUsage.totalTokenCount())
.isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount());
assertThat(response.finishReason()).isEqualTo(STOP);
verify(calculator).squareRoot(485906798473894056.0);
verifyNoMoreInteractions(calculator);
List<ChatMessage> messages = chatMemory.messages();
assertThat(messages).hasSize(4);
assertThat(messages.get(0)).isInstanceOf(dev.langchain4j.data.message.UserMessage.class);
assertThat(messages.get(0).text()).isEqualTo(userMessage);
AiMessage aiMessage = (AiMessage) messages.get(1);
assertThat(aiMessage.text()).isNull();
assertThat(aiMessage.toolExecutionRequests()).hasSize(1);
ToolExecutionRequest toolExecutionRequest = aiMessage.toolExecutionRequests().get(0);
assertThat(toolExecutionRequest.id()).isNotBlank();
assertThat(toolExecutionRequest.name()).isEqualTo("squareRoot");
assertThat(toolExecutionRequest.arguments())
.isEqualToIgnoringWhitespace("{\"arg0\": 485906798473894056}");
ToolExecutionResultMessage toolExecutionResultMessage = (ToolExecutionResultMessage) messages.get(2);
assertThat(toolExecutionResultMessage.id()).isEqualTo(toolExecutionRequest.id());
assertThat(toolExecutionResultMessage.toolName()).isEqualTo("squareRoot");
assertThat(toolExecutionResultMessage.text()).isEqualTo("6.97070153193991E8");
assertThat(messages.get(3)).isInstanceOf(AiMessage.class);
assertThat(messages.get(3).text()).contains("6.97");
verify(chatLanguageModel).generate(
singletonList(messages.get(0)),
singletonList(Calculator.EXPECTED_SPECIFICATION)
);
verify(chatLanguageModel).generate(
asList(messages.get(0), messages.get(1), messages.get(2)),
singletonList(Calculator.EXPECTED_SPECIFICATION)
);
}
@Test
void should_execute_multiple_tools_sequentially_then_answer() {
ChatLanguageModel chatLanguageModel = spy(OpenAiChatModel.builder()
.baseUrl(System.getenv("OPENAI_BASE_URL"))
.apiKey(System.getenv("OPENAI_API_KEY"))
.organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
.modelName(GPT_3_5_TURBO_0613) // this model can only call tools sequentially
.temperature(0.0)
.logRequests(true)
.logResponses(true)
.build());
Calculator calculator = spy(new Calculator());
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(chatMemory)
.tools(calculator)
.build();
String userMessage = "What is the square root of 485906798473894056 and 97866249624785 in scientific notation?";
Response<AiMessage> response = assistant.chat(userMessage);
assertThat(response.content().text()).contains("6.97", "9.89");
TokenUsage tokenUsage = response.tokenUsage();
assertThat(tokenUsage.inputTokenCount()).isEqualTo(79 + 117 + 152);
assertThat(tokenUsage.outputTokenCount()).isCloseTo(21 + 20 + 53, withPercentage(5));
assertThat(tokenUsage.totalTokenCount())
.isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount());
assertThat(response.finishReason()).isEqualTo(STOP);
verify(calculator).squareRoot(485906798473894056.0);
verify(calculator).squareRoot(97866249624785.0);
verifyNoMoreInteractions(calculator);
List<ChatMessage> messages = chatMemory.messages();
assertThat(messages).hasSize(6);
assertThat(messages.get(0)).isInstanceOf(dev.langchain4j.data.message.UserMessage.class);
assertThat(messages.get(0).text()).isEqualTo(userMessage);
AiMessage aiMessage = (AiMessage) messages.get(1);
assertThat(aiMessage.text()).isNull();
assertThat(aiMessage.toolExecutionRequests()).hasSize(1);
ToolExecutionRequest toolExecutionRequest = aiMessage.toolExecutionRequests().get(0);
assertThat(toolExecutionRequest.id()).isNotBlank();
assertThat(toolExecutionRequest.name()).isEqualTo("squareRoot");
assertThat(toolExecutionRequest.arguments())
.isEqualToIgnoringWhitespace("{\"arg0\": 485906798473894056}");
ToolExecutionResultMessage toolExecutionResultMessage = (ToolExecutionResultMessage) messages.get(2);
assertThat(toolExecutionResultMessage.id()).isEqualTo(toolExecutionRequest.id());
assertThat(toolExecutionResultMessage.toolName()).isEqualTo("squareRoot");
assertThat(toolExecutionResultMessage.text()).isEqualTo("6.97070153193991E8");
AiMessage secondAiMessage = (AiMessage) messages.get(3);
assertThat(secondAiMessage.text()).isNull();
assertThat(secondAiMessage.toolExecutionRequests()).hasSize(1);
ToolExecutionRequest secondToolExecutionRequest = secondAiMessage.toolExecutionRequests().get(0);
assertThat(secondToolExecutionRequest.id()).isNotBlank();
assertThat(secondToolExecutionRequest.name()).isEqualTo("squareRoot");
assertThat(secondToolExecutionRequest.arguments())
.isEqualToIgnoringWhitespace("{\"arg0\": 97866249624785}");
ToolExecutionResultMessage secondToolExecutionResultMessage = (ToolExecutionResultMessage) messages.get(4);
assertThat(secondToolExecutionResultMessage.id()).isEqualTo(secondToolExecutionRequest.id());
assertThat(secondToolExecutionResultMessage.toolName()).isEqualTo("squareRoot");
assertThat(secondToolExecutionResultMessage.text()).isEqualTo("9892737.215997653");
assertThat(messages.get(5)).isInstanceOf(AiMessage.class);
assertThat(messages.get(5).text()).contains("6.97", "9.89");
verify(chatLanguageModel).generate(
singletonList(messages.get(0)),
singletonList(Calculator.EXPECTED_SPECIFICATION)
);
verify(chatLanguageModel).generate(
asList(messages.get(0), messages.get(1), messages.get(2)),
singletonList(Calculator.EXPECTED_SPECIFICATION)
);
verify(chatLanguageModel).generate(
asList(messages.get(0), messages.get(1), messages.get(2), messages.get(3), messages.get(4)),
singletonList(Calculator.EXPECTED_SPECIFICATION)
);
}
@Test
void should_execute_multiple_tools_in_parallel_then_answer() {
Calculator calculator = spy(new Calculator());
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(chatMemory)
.tools(calculator)
.build();
String userMessage = "What is the square root of 485906798473894056 and 97866249624785 in scientific notation?";
Response<AiMessage> response = assistant.chat(userMessage);
assertThat(response.content().text()).contains("6.97", "9.89");
TokenUsage tokenUsage = response.tokenUsage();
assertThat(tokenUsage.inputTokenCount()).isEqualTo(79 + 160);
assertThat(tokenUsage.outputTokenCount()).isCloseTo(54 + 58, withPercentage(5));
assertThat(tokenUsage.totalTokenCount())
.isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount());
assertThat(response.finishReason()).isEqualTo(STOP);
verify(calculator).squareRoot(485906798473894056.0);
verify(calculator).squareRoot(97866249624785.0);
verifyNoMoreInteractions(calculator);
List<ChatMessage> messages = chatMemory.messages();
assertThat(messages).hasSize(5);
assertThat(messages.get(0)).isInstanceOf(dev.langchain4j.data.message.UserMessage.class);
assertThat(messages.get(0).text()).isEqualTo(userMessage);
AiMessage aiMessage = (AiMessage) messages.get(1);
assertThat(aiMessage.text()).isNull();
assertThat(aiMessage.toolExecutionRequests()).hasSize(2);
ToolExecutionRequest firstToolExecutionRequest = aiMessage.toolExecutionRequests().get(0);
assertThat(firstToolExecutionRequest.id()).isNotBlank();
assertThat(firstToolExecutionRequest.name()).isEqualTo("squareRoot");
assertThat(firstToolExecutionRequest.arguments())
.isEqualToIgnoringWhitespace("{\"arg0\": 485906798473894056}");
ToolExecutionRequest secondToolExecutionRequest = aiMessage.toolExecutionRequests().get(1);
assertThat(secondToolExecutionRequest.id()).isNotBlank();
assertThat(secondToolExecutionRequest.name()).isEqualTo("squareRoot");
assertThat(secondToolExecutionRequest.arguments())
.isEqualToIgnoringWhitespace("{\"arg0\": 97866249624785}");
ToolExecutionResultMessage firstToolExecutionResultMessage = (ToolExecutionResultMessage) messages.get(2);
assertThat(firstToolExecutionResultMessage.id()).isEqualTo(firstToolExecutionRequest.id());
assertThat(firstToolExecutionResultMessage.toolName()).isEqualTo("squareRoot");
assertThat(firstToolExecutionResultMessage.text()).isEqualTo("6.97070153193991E8");
ToolExecutionResultMessage secondToolExecutionResultMessage = (ToolExecutionResultMessage) messages.get(3);
assertThat(secondToolExecutionResultMessage.id()).isEqualTo(secondToolExecutionRequest.id());
assertThat(secondToolExecutionResultMessage.toolName()).isEqualTo("squareRoot");
assertThat(secondToolExecutionResultMessage.text()).isEqualTo("9892737.215997653");
assertThat(messages.get(4)).isInstanceOf(AiMessage.class);
assertThat(messages.get(4).text()).contains("6.97", "9.89");
verify(chatLanguageModel).generate(
singletonList(messages.get(0)),
singletonList(Calculator.EXPECTED_SPECIFICATION)
);
verify(chatLanguageModel).generate(
asList(messages.get(0), messages.get(1), messages.get(2), messages.get(3)),
singletonList(Calculator.EXPECTED_SPECIFICATION)
);
}
static class StringListProcessor {
static ToolSpecification EXPECTED_SPECIFICATION = ToolSpecification.builder()
.name("processStrings")
.description("Processes list of strings")
.addParameter("arg0", ARRAY, items(STRING), description("List of strings to process"))
.build();
@Tool("Processes list of strings")
void processStrings(@P("List of strings to process") List<String> strings) {
System.out.printf("called processStrings(%s)%n", strings);
}
}
@Test
void should_use_tool_with_List_of_Strings_parameter() {
StringListProcessor stringListProcessor = spy(new StringListProcessor());
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(chatMemory)
.tools(stringListProcessor)
.build();
String userMessage = "Process strings 'cat' and 'dog' together, do not separate them!";
// when
assistant.chat(userMessage);
// then
verify(stringListProcessor).processStrings(asList("cat", "dog"));
verifyNoMoreInteractions(stringListProcessor);
List<ChatMessage> messages = chatMemory.messages();
verify(chatLanguageModel).generate(
singletonList(messages.get(0)),
singletonList(StringListProcessor.EXPECTED_SPECIFICATION)
);
verify(chatLanguageModel).generate(
asList(messages.get(0), messages.get(1), messages.get(2)),
singletonList(StringListProcessor.EXPECTED_SPECIFICATION)
);
}
static class IntegerListProcessor {
static ToolSpecification EXPECTED_SPECIFICATION = ToolSpecification.builder()
.name("processIntegers")
.description("Processes list of integers")
.addParameter("arg0", ARRAY, items(INTEGER), description("List of integers to process"))
.build();
@Tool("Processes list of integers")
void processIntegers(@P("List of integers to process") List<Integer> integers) {
System.out.printf("called processIntegers(%s)%n", integers);
}
}
@Test
@Disabled
// TODO fix: should automatically convert List<Double> into List<Integer>
void should_use_tool_with_List_of_Integers_parameter() {
IntegerListProcessor integerListProcessor = spy(new IntegerListProcessor());
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(chatMemory)
.tools(integerListProcessor)
.build();
String userMessage = "Process integers 1 and 2 together, do not separate them!";
// when
assistant.chat(userMessage);
// then
verify(integerListProcessor).processIntegers(asList(1, 2));
verifyNoMoreInteractions(integerListProcessor);
List<ChatMessage> messages = chatMemory.messages();
verify(chatLanguageModel).generate(
singletonList(messages.get(0)),
singletonList(IntegerListProcessor.EXPECTED_SPECIFICATION)
);
verify(chatLanguageModel).generate(
asList(messages.get(0), messages.get(1), messages.get(2)),
singletonList(IntegerListProcessor.EXPECTED_SPECIFICATION)
);
}
static class StringArrayProcessor {
static ToolSpecification EXPECTED_SPECIFICATION = ToolSpecification.builder()
.name("processStrings")
.description("Processes array of strings")
.addParameter("arg0", ARRAY, items(STRING), description("Array of strings to process"))
.build();
@Tool("Processes array of strings")
void processStrings(@P("Array of strings to process") String[] ids) {
System.out.printf("called processStrings(%s)%n", Arrays.toString(ids));
}
}
@Test
@Disabled
// TODO fix: should automatically convert List<String> into String[]
void should_use_tool_with_Array_of_Strings_parameter() {
StringArrayProcessor stringArrayProcessor = spy(new StringArrayProcessor());
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(chatMemory)
.tools(stringArrayProcessor)
.build();
String userMessage = "Process strings 'cat' and 'dog' together, do not separate them!";
// when
assistant.chat(userMessage);
// then
verify(stringArrayProcessor).processStrings(new String[]{"cat", "dog"});
verifyNoMoreInteractions(stringArrayProcessor);
List<ChatMessage> messages = chatMemory.messages();
verify(chatLanguageModel).generate(
singletonList(messages.get(0)),
singletonList(StringArrayProcessor.EXPECTED_SPECIFICATION)
);
verify(chatLanguageModel).generate(
asList(messages.get(0), messages.get(1), messages.get(2)),
singletonList(StringArrayProcessor.EXPECTED_SPECIFICATION)
);
}
static class WeatherService {
static ToolSpecification EXPECTED_SPECIFICATION = ToolSpecification.builder()
.name("currentTemperature")
.description("") // TODO should be null?
.addParameter("arg0", STRING)
.addParameter("arg1", STRING, from("enum", asList("CELSIUS", "fahrenheit", "Kelvin")))
.build();
@Tool
int currentTemperature(String city, TemperatureUnit unit) {
System.out.printf("called currentTemperature(%s, %s)%n", city, unit);
return 42;
}
}
enum TemperatureUnit {
CELSIUS, fahrenheit, Kelvin
}
@Test
void should_use_tool_with_enum_parameter() {
// given
WeatherService weatherService = spy(new WeatherService());
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(chatLanguageModel)
.chatMemory(chatMemory)
.tools(weatherService)
.build();
// when
Response<AiMessage> response = assistant.chat("What is the temperature in Munich now, in kelvin?");
// then
assertThat(response.content().text()).contains("42");
verify(weatherService).currentTemperature("Munich", Kelvin);
verifyNoMoreInteractions(weatherService);
List<ChatMessage> messages = chatMemory.messages();
verify(chatLanguageModel).generate(
singletonList(messages.get(0)),
singletonList(WeatherService.EXPECTED_SPECIFICATION)
);
verify(chatLanguageModel).generate(
asList(messages.get(0), messages.get(1), messages.get(2)),
singletonList(WeatherService.EXPECTED_SPECIFICATION)
);
}
// TODO test Lists, Sets, Arrays of different types (including enums).
}
| [
"dev.langchain4j.agent.tool.ToolSpecification.builder",
"dev.langchain4j.model.openai.OpenAiChatModel.builder"
] | [((1704, 2020), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1999), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1967), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1936), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1906), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1837), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((1704, 1784), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((2298, 2551), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((2298, 2526), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((2298, 2443), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((2298, 2361), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((5791, 6229), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 6204), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 6168), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 6133), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 6052), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 6005), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 5932), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((5791, 5875), 'dev.langchain4j.model.openai.OpenAiChatModel.builder'), ((14240, 14493), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((14240, 14468), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((14240, 14365), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((14240, 14307), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((16020, 16277), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((16020, 16252), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((16020, 16147), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((16020, 16088), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((17902, 18157), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((17902, 18132), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((17902, 18028), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((17902, 17969), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((19791, 20093), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((19791, 20068), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((19791, 19965), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((19791, 19895), 'dev.langchain4j.agent.tool.ToolSpecification.builder'), ((19791, 19862), 'dev.langchain4j.agent.tool.ToolSpecification.builder')] |