project / app /config /types.ts
kabylake's picture
commit
7bd11ed
// Configuration for embeddings, including paths, models, and document settings
type EmbeddingsConfig = {
embeddings_path: string; // Path where embeddings will be saved
embedding_model: { // Optional embedding model specification
model_name: string; // Name of the model
};
splade_config: { // Optional configuration for SPLADE
n_batch: number; // Batch size for processing
};
chunk_sizes: number[]; // Chunk sizes for splitting during embedding
document_settings: { // Settings for document processing
doc_path: string; // Path to documents
additional_parser_settings?: { // Optional settings for parsing documents
md: { // Settings for Markdown documents
skip_first: boolean; // Whether to skip the first section
merge_sections: boolean; // Whether to merge sections
remove_images: boolean; // Whether to remove images from documents
};
};
passage_prefix: string; // Prefix for passages
}[];
};
// Configuration for semantic search functionality
type SemanticSearchConfig = {
max_k: number; // Maximum number of results to return
max_char_size: number; // Max character size for context provided to models
query_prefix: string; // Prefix for queries
};
export type AppConfig = {
cache_folder: string;
embeddings: EmbeddingsConfig;
semantic_search: SemanticSearchConfig;
};
// Type definition for the LLM configuration section for OpenAI models
type OpenAIConfig = {
type: 'openai'; // Specifies the use of an OpenAI model
params: {
prompt_template: string; // Template for constructing prompts for the model. It includes placeholders for context and questions.
model_kwargs: { // Keyword arguments for configuring the model's inference behavior
openai_api_key: string; // API key for accessing the OpenAI API
temperature: number; // Temperature setting for controlling the randomness of response generation. A value of 0.0 generates deterministic responses.
model_name: string; // Specifies the name of the model to be used for generating responses.
};
};
};
// Type definition for the LLM configuration section for Google VertexAI models
type VertexAIConfig = {
type: 'vertexai'; // Specifies the use of an OpenAI model
params: {
prompt_template: string; // Template for constructing prompts for the model. It includes placeholders for context and questions.
model_kwargs: { // Keyword arguments for configuring the model's inference behavior
model_name: string; // Specifies the name of the model to be used for generating responses.
temperature: number; // Temperature setting for controlling the randomness of response generation. A value of 0.0 generates deterministic responses.
};
};
};
// Type definition for the LLM configuration section
export type LLMConfig = OpenAIConfig | VertexAIConfig;