date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | SBrandeis/transformers | src~transformers~models~openai~modeling_tf_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 OpenAI GPT model."""
from dataclasses import dataclass
from typing import Optional, Tuple
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput, TFSequenceClassifierOutput
from ...modeling_tf_utils import (
TFCausalLanguageModelingLoss,
TFConv1D,
TFPreTrainedModel,
TFSequenceClassificationLoss,
TFSequenceSummary,
TFSharedEmbeddings,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_openai import OpenAIGPTConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "OpenAIGPTConfig"
_TOKENIZER_FOR_DOC = "OpenAIGPTTokenizer"
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"openai-gpt",
# See all OpenAI GPT models at https://huggingface.co/models?filter=openai-gpt
]
class TFAttention(tf.keras.layers.Layer):
def __init__(self, nx, n_ctx, config, scale=False, **kwargs):
super().__init__(**kwargs)
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert (
n_state % config.n_head == 0
), f"Hidden dimension {n_state} not dividable by number of heads {config.n_head}"
self.n_ctx = n_ctx
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.output_attentions = config.output_attentions
self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn")
self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj")
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
pass
@staticmethod
def causal_attention_mask(nd, ns, dtype):
"""
1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]),
-1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:, None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False):
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
if self.scale:
dk = tf.cast(shape_list(k)[-1], tf.float32) # scale attention_scores
w = w / tf.math.sqrt(dk)
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = tf.nn.softmax(w, axis=-1)
w = self.attn_dropout(w, training=training)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [tf.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = tf.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
return tf.reshape(x, new_x_shape)
def split_heads(self, x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
x = tf.reshape(x, new_x_shape)
return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
def call(self, x, attention_mask, head_mask, output_attentions, training=False):
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query)
key = self.split_heads(key)
value = self.split_heads(value)
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a, training=training)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class TFMLP(tf.keras.layers.Layer):
def __init__(self, n_state, config, **kwargs):
super().__init__(**kwargs)
nx = config.n_embd
self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc")
self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj")
self.act = get_tf_activation("gelu")
self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)
def call(self, x, training=False):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
h2 = self.dropout(h2, training=training)
return h2
class TFBlock(tf.keras.layers.Layer):
def __init__(self, n_ctx, config, scale=False, **kwargs):
super().__init__(**kwargs)
nx = config.n_embd
self.attn = TFAttention(nx, n_ctx, config, scale, name="attn")
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
self.mlp = TFMLP(4 * nx, config, name="mlp")
self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2")
def call(self, x, attention_mask, head_mask, output_attentions, training=False):
output_attn = self.attn(x, attention_mask, head_mask, output_attentions, training=training)
a = output_attn[0] # output_attn: a, (attentions)
n = self.ln_1(x + a)
m = self.mlp(n, training=training)
h = self.ln_2(n + m)
outputs = [h] + output_attn[1:]
return outputs # x, (attentions)
@keras_serializable
class TFOpenAIGPTMainLayer(tf.keras.layers.Layer):
config_class = OpenAIGPTConfig
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
self.config = config
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.return_dict = config.use_return_dict
self.num_hidden_layers = config.n_layer
self.vocab_size = config.vocab_size
self.n_embd = config.n_embd
self.tokens_embed = TFSharedEmbeddings(
config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="tokens_embed"
)
self.positions_embed = tf.keras.layers.Embedding(
config.n_positions,
config.n_embd,
embeddings_initializer=get_initializer(config.initializer_range),
name="positions_embed",
)
self.drop = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [TFBlock(config.n_ctx, config, scale=True, name="h_._{}".format(i)) for i in range(config.n_layer)]
def get_input_embeddings(self):
return self.tokens_embed
def set_input_embeddings(self, value):
self.tokens_embed.weight = value
self.tokens_embed.vocab_size = shape_list(value)[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
inputs["input_ids"] = tf.reshape(inputs["input_ids"], [-1, input_shape[-1]])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["position_ids"] is None:
inputs["position_ids"] = tf.range(input_shape[-1], dtype=tf.int32)[tf.newaxis, :]
if inputs["attention_mask"] is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
inputs["attention_mask"] = inputs["attention_mask"][:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
inputs["attention_mask"] = tf.cast(inputs["attention_mask"], tf.float32)
inputs["attention_mask"] = (1.0 - inputs["attention_mask"]) * -10000.0
else:
inputs["attention_mask"] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if inputs["head_mask"] is not None:
raise NotImplementedError
else:
inputs["head_mask"] = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
inputs["position_ids"] = tf.reshape(inputs["position_ids"], [-1, shape_list(inputs["position_ids"])[-1]])
if inputs["inputs_embeds"] is None:
inputs["inputs_embeds"] = self.tokens_embed(inputs["input_ids"], mode="embedding")
position_embeds = self.positions_embed(inputs["position_ids"])
if inputs["token_type_ids"] is not None:
inputs["token_type_ids"] = tf.reshape(
inputs["token_type_ids"], [-1, shape_list(inputs["token_type_ids"])[-1]]
)
token_type_embeds = self.tokens_embed(inputs["token_type_ids"], mode="embedding")
else:
token_type_embeds = 0
hidden_states = inputs["inputs_embeds"] + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=inputs["training"])
output_shape = input_shape + [shape_list(hidden_states)[-1]]
all_attentions = () if inputs["output_attentions"] else None
all_hidden_states = () if inputs["output_hidden_states"] else None
for i, block in enumerate(self.h):
if inputs["output_hidden_states"]:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block(
hidden_states,
inputs["attention_mask"],
inputs["head_mask"][i],
inputs["output_attentions"],
training=inputs["training"],
)
hidden_states = outputs[0]
if inputs["output_attentions"]:
all_attentions = all_attentions + (outputs[1],)
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if inputs["output_hidden_states"]:
all_hidden_states = all_hidden_states + (hidden_states,)
if inputs["output_attentions"]:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
if not inputs["return_dict"]:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = OpenAIGPTConfig
base_model_prefix = "transformer"
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
@dataclass
class TFOpenAIGPTDoubleHeadsModelOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
logits: tf.Tensor = None
mc_logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
OPENAI_GPT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.OpenAIGPTTokenizer`. See
:func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="openai-gpt",
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.transformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
# Copied from transformers.models.distilbert.modeling_tf_distilbert.TFDistilBertModel.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel, TFCausalLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
def get_output_embeddings(self):
return self.get_input_embeddings()
def set_output_embeddings(self, value):
self.set_input_embeddings(value)
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="openai-gpt",
output_type=TFCausalLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,
config.vocab_size - 1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
transformer_outputs = self.transformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = transformer_outputs[0]
logits = self.transformer.tokens_embed(hidden_states, mode="linear")
loss = None
if inputs["labels"] is not None:
# shift labels to the left and cut last logit token
logits = logits[:, :-1]
labels = inputs["labels"][:, 1:]
loss = self.compute_loss(labels, logits)
if not inputs["return_dict"]:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFCausalLMOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
input embeddings, the classification head takes as input the input of a specified classification token index in the
input sequence).
""",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
config.num_labels = 1
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
self.multiple_choice_head = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="multiple_choice_head"
)
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFOpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
mc_token_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input):
Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) -
1]``.
Return:
Examples::
>>> import tensorflow as tf
>>> from transformers import OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel
>>> tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
>>> model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
>>> # Add a [CLS] to the vocabulary (we should train it also!)
>>> tokenizer.add_special_tokens({'cls_token': '[CLS]'})
>>> model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
>>> print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> encoding = tokenizer(choices, return_tensors="tf")
>>> inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()}
>>> inputs["mc_token_ids"]= tf.constant([inputs["input_ids"].shape[-1] - 1, inputs["input_ids"].shape[-1] - 1])[None, :] # Batch size 1
>>> outputs = model(inputs)
>>> lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
mc_token_ids=mc_token_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
input_shapes = shape_list(inputs["input_ids"])
else:
input_shapes = shape_list(inputs["inputs_embeds"])[:-1]
seq_length = input_shapes[-1]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
transformer_outputs = self.transformer(
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
inputs["head_mask"],
inputs["inputs_embeds"],
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = transformer_outputs[0]
hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
mc_logits = self.multiple_choice_head(hidden_states, inputs["mc_token_ids"], training=inputs["training"])
mc_logits = tf.squeeze(mc_logits, axis=-1)
if not inputs["return_dict"]:
return (lm_logits, mc_logits) + transformer_outputs[1:]
return TFOpenAIGPTDoubleHeadsModelOutput(
logits=lm_logits,
mc_logits=mc_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"mc_token_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFOpenAIGPTDoubleHeadsModelOutput(
logits=output.logits, mc_logits=output.mc_logits, hidden_states=hs, attentions=attns
)
@add_start_docstrings(
"""
The OpenAI GPT Model transformer with a sequence classification head on top (linear layer).
:class:`~transformers.TFOpenAIGPTForSequenceClassification` uses the last token in order to do the classification,
as other causal models (e.g. GPT-2) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
:obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each
row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take
the last value in each row of the batch).
""",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTForSequenceClassification(TFOpenAIGPTPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.score = tf.keras.layers.Dense(
config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="score",
use_bias=False,
)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="openai-gpt",
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,
config.vocab_size - 1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
transformer_outputs = self.transformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
logits_shape = shape_list(logits)
in_logits = None
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if inputs["input_ids"] is not None:
sequence_lengths = (
tf.reduce_sum(
tf.cast(tf.math.not_equal(inputs["input_ids"], self.config.pad_token_id), tf.int32),
-1,
keepdims=False,
)
- 1
)
def get_seq_element(sequence_position, input_batch):
return tf.strided_slice(
input_batch, [sequence_position, 0], [sequence_position + 1, input_batch.shape[-1]], [1, 1]
)
result = tf.map_fn(
fn=lambda t: get_seq_element(t[0], t[1]), elems=[sequence_lengths, logits], dtype="float"
)
in_logits = tf.reshape(result, [logits_shape[0], logits_shape[-1]])
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
loss = None
if inputs["labels"] is not None:
if input_ids is not None:
batch_size, sequence_length = shape_list(inputs["input_ids"])[:2]
else:
batch_size, sequence_length = shape_list(inputs["inputs_embeds"])[:2]
assert (
self.config.pad_token_id is not None or batch_size == 1
), "Cannot handle batch sizes > 1 if no padding token is defined."
if not tf.is_tensor(sequence_lengths):
in_logits = logits[0:batch_size, sequence_lengths]
loss = self.compute_loss(
tf.reshape(inputs["labels"], [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels])
)
pooled_logits = in_logits if in_logits is not None else logits
if not inputs["return_dict"]:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=pooled_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
| [] |
2024-01-10 | Lucete28/TradeTrend | TT_runfile~update_naver_raw.py | from airflow.models.variable import Variable
import openai
import pandas as pd
openai.api_key = Variable.get("gpt_api_key")
Target_list = Variable.get("Target_list")
values = [tuple(item.strip("()").split(",")) for item in Target_list.split("),")]
values = [(x[0].strip(), x[1].strip()) for x in values]
err_report = []
for val in values:
gpt_ans = []
temp_df = pd.read_csv(f'/home/jhy/code/TradeTrend/data/{val[0]}/{val[0]}_temp4.csv')
raw_df = pd.read_csv(f'/home/jhy/code/TradeTrend/data/{val[0]}/{val[0]}_news_raw2.csv')
ans_list = raw_df.iloc[:, 1]
while True:
condition_satisfied = True # ๋ชจ๋ ์กฐ๊ฑด์ด ๋ง์กฑ๋์๋์ง ์ฌ๋ถ๋ฅผ ์ถ์ ํ๋ ํ๋๊ทธ ๋ณ์
for i, ans in enumerate(ans_list):
try:
if len(str(ans)) > 4 or (float(ans) > 1 or float(ans) < 0):
messages = []
a = temp_df.iloc[i, 1]
content = f'{a} {val[1]} ๊ด๋ จ ๋ด์ค๊ธฐ์ฌ ์ ๋ชฉ์ธ๋ฐ {val[1]} ์ฃผ์์ ๋ฏธ์น ๊ธ์ ๋์ ํ๊ท ์ 0์์ 1์ฌ์ด ์์ซ์ ๋์๋ฆฌ๊น์ง ๋ํ๋ด float๊ฐ๋ง'
messages.append({"role": "user", "content": content})
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
chat_response = completion.choices[0].message.content
gpt_ans.append(chat_response)
messages.append({"role": "assistant", "content": chat_response})
# raw_df์์ ํด๋น ๊ฐ์ ์๋ก์ด ๊ฐ์ผ๋ก ์
๋ฐ์ดํธํฉ๋๋ค.
raw_df.iloc[i, 1] = chat_response
raw_df.to_csv(f'/home/jhy/code/TradeTrend/data/{val[0]}/{val[0]}_news_raw2.csv', index=False)
condition_satisfied = False # ์กฐ๊ฑด์ด ํ๋ ์ด์์ ํญ๋ชฉ์ ๋ํด ๋ง์กฑ๋์ง ์์์์ ํ์ํฉ๋๋ค.
except: # ์๋ฌ ๋ฐ์
print(i, ans)
err_report.append(ans)
condition_satisfied = False
if condition_satisfied:
break # ๋ชจ๋ ํญ๋ชฉ์ ์กฐ๊ฑด์ด ๋ง์กฑ๋์์ ๊ฒฝ์ฐ ๋ฐ๋ณต๋ฌธ์ ์ข
๋ฃํฉ๋๋ค.
for err in err_report:
if err_report.count(err) >=5:
print("5ํ ์ด์ ๊ฐ์ err ๋ฐ์")
break
| [
"PLACEHOLDER PLACEHOLDER ๊ด๋ จ ๋ด์ค๊ธฐ์ฌ ์ ๋ชฉ์ธ๋ฐ PLACEHOLDER ์ฃผ์์ ๋ฏธ์น ๊ธ์ ๋์ ํ๊ท ์ 0์์ 1์ฌ์ด ์์ซ์ ๋์๋ฆฌ๊น์ง ๋ํ๋ด float๊ฐ๋ง"
] |
2024-01-10 | LilithHafner/ai | integrated_ai.py | import openai
openai.api_key = "sk-..."
# GPT AI
def ai(prompt):
response = openai.Completion.create(
engine="code-davinci-002",
prompt=prompt,
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop="<end>"
)
return response.choices[0].text
# Subprocesses
def user(prompt):
return input(prompt+"\n*>> ")
import traceback
def python_eval(prompt):
try:
return str(eval(prompt, globals()))
except:
return traceback.format_exc()
def python_exec(prompt):
try:
return str(exec(prompt, globals()))
except:
return traceback.format_exc()
subprocesses = [
("<user output>", "<user input>", user),
("<python eval>", "<python eval result>", python_eval),
("<python exec>", "<python exec result>", python_exec),
]
def subprocess(s):
for start, end, func in subprocesses:
if s.startswith(start):
return end + func(s[len(start):])
# print("The AI made an unsupported query:", s, "", sep="\n")
return "<error>unknown tag"
## Training data
prompt = """This is a question and answer bot that has oracles to various external tools including python, google, and others
<user input>what time is it<end>
<pyhton eval>time.ctime()<end>
<python eval result>Traceback (most recent call last):
File "/Users/x/Documents/integrated_ai.py", line 26, in python
return str(eval(prompt, globals(), locals()))
File "<string>", line 1, in <module>
NameError: name 'time' is not defined<end>
<python exec>import time<end>
<python exec result>None<end>
<python eval>time.ctime()<end>
<user output>The time is Sun Apr 24 18:01:32 2022<end>
<user input>what is the weather in New York<end>
<google>weather in New York<end>
<google result>Sunny
53ยฐFยฐC
Precipitation: 1%
Humidity: 52%
Wind: 7 mph
New York, NY
Sunday 6:00 PM
Sunny
TemperaturePrecipitationWind<end>
<user output>The weather in New York is Sunny<end>
<user input>is it warm in chicago?<end>
<google>weather in chicago<end>
result: Cloudy
70ยฐFยฐC
Precipitation: 5%
Humidity: 65%
Wind: 19 mph
Chicago, IL
Sunday 6:00 PM
Cloudy
TemperaturePrecipitationWind<end>
<user output>It is warm in chicago<end>
<user input>is 1729 prime?<end>
<python eval>is_prime(1729)<end>
<python eval result>Traceback (most recent call last):
File "/Users/x/Documents/integrated_ai.py", line 26, in python_eval
return str(eval(prompt, globals()))
File "<string>", line 1, in <module>
NameError: name 'is_prime' is not defined<end>
<python exec>def is_prime(n):
if n <= 1:
return False
for i in range(2, n):
if n % i == 0:
return False
return True<end>
<python exec result>None<end>
<python eval>is_prime(1729)<end>
<python eval result>False<end>
<user output>1729 is not prime<end>
<user input>Stop using google<end>
<user output>Google disabled.<end>
<user input>What's the weather?<end>
<user output>I cannot answer that question without google<end>
<user input>Name 7 edibe mushrooms<end>
<user output>Pleurotus, Lentinula edodes, Shiitake mushroom, Auricularia auricula-judae, Volvariella volvacea, Flammulina velutipes, Tremella fuciformis<end>"""
# Main loop
def kernal(verbose=True):
global prompt
prompt += "<user input>" + user("Welcome!") + "<end>\n"
while True:
call = ai(prompt)
if verbose:
print(call + "<end>")
prompt += call + "<end>\n"
if call.startswith("<exit>"):
return
result = subprocess(call)
if verbose:
print(result + "<end>")
prompt += result + "<end>\n"
if __name__ == "__main__":
kernal()
| [
"This is a question and answer bot that has oracles to various external tools including python, google, and others\n\n<user input>what time is it<end>\n<pyhton eval>time.ctime()<end>\n<python eval result>Traceback (most recent call last):\n File \"/Users/x/Documents/integrated_ai.py\", line 26, in python\n return str(eval(prompt, globals(), locals()))\n File \"<string>\", line 1, in <module>\nNameError: name 'time' is not defined<end>\n<python exec>import time<end>\n<python exec result>None<end>\n<python eval>time.ctime()<end>\n<user output>The time is Sun Apr 24 18:01:32 2022<end>\n<user input>what is the weather in New York<end>\n<google>weather in New York<end>\n<google result>Sunny\n53ยฐFยฐC\nPrecipitation: 1%\nHumidity: 52%\nWind: 7 mph\nNew York, NY\nSunday 6:00 PM\nSunny\nTemperaturePrecipitationWind<end>\n<user output>The weather in New York is Sunny<end>\n<user input>is it warm in chicago?<end>\n<google>weather in chicago<end>\nresult: Cloudy\n70ยฐFยฐC\nPrecipitation: 5%\nHumidity: 65%\nWind: 19 mph\nChicago, IL\nSunday 6:00 PM\nCloudy\nTemperaturePrecipitationWind<end>\n<user output>It is warm in chicago<end>\n<user input>is 1729 prime?<end>\n<python eval>is_prime(1729)<end>\n<python eval result>Traceback (most recent call last):\n File \"/Users/x/Documents/integrated_ai.py\", line 26, in python_eval\n return str(eval(prompt, globals()))\n File \"<string>\", line 1, in <module>\nNameError: name 'is_prime' is not defined<end>\n<python exec>def is_prime(n):\n if n <= 1:\n return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True<end>\n<python exec result>None<end>\n<python eval>is_prime(1729)<end>\n<python eval result>False<end>\n<user output>1729 is not prime<end>\n<user input>Stop using google<end>\n<user output>Google disabled.<end>\n<user input>What's the weather?<end>\n<user output>I cannot answer that question without google<end>\n<user input>Name 7 edibe mushrooms<end>\n<user output>Pleurotus, Lentinula edodes, Shiitake mushroom, Auricularia auricula-judae, Volvariella volvacea, Flammulina velutipes, Tremella fuciformis<end>",
"<end>\n",
"<user input>",
"PLACEHOLDER<end>\n"
] |
2024-01-10 | Kororinpas/Lit_Tool | document_util.py | def get_split_documents(docs, chunk_size, chunk_overlap):
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,chunk_overlap=chunk_overlap)
return text_splitter.split_documents(docs) | [] |
2024-01-10 | Kororinpas/Lit_Tool | literature_test.py | import streamlit as st
import sys
class StreamlitWriter:
def write(self, text):
st.write(text.strip())
### This the function about streamlit
def Vector_Databse():
st.write("Vector Database")
choose = st.radio("Choose using an existing database or upload a new one.",
["Using an existing one", "Uploading a new one"])
import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if choose == "Using an existing one":
persist_dirctory = st.text_input("Enter the persist_dirctory")
collection = st.text_input("Enter the collection")
if st.button('Confirm'):
st.session_state['persist_dirctory'] = persist_dirctory
st.session_state['collection'] = collection
vectorstore,embeddings = load_vectorstore(persist_directory=st.session_state['persist_dirctory'],
collection_name = st.session_state['collection'],
model_name = 'sentence-transformers/all-mpnet-base-v2',
device = device)
st.session_state['vectorstore'] = vectorstore
st.session_state['embeddings'] = embeddings
print('The vectorstore load successfully')
else:
path = st.text_input("Enter the path")
persist_dirctory = st.text_input("Enter the persist_dirctory")
collection = st.text_input("Enter the collection")
if st.button('Confirm'):
st.session_state['path'] = path
st.session_state['persist_dirctory'] = persist_dirctory
st.session_state['collection'] = collection
split_docs = load_pdf(path = st.session_state['path'],
openai_api_key=st.session_state['openai_api_key'],
chunk_size=st.session_state['chunk_size'],
chunk_overlap=st.session_state['chunk_overlap'])
vectorstore,embeddings = generate_vectorstore(split_docs = split_docs,
model_name = 'sentence-transformers/all-mpnet-base-v2',
persist_directory = st.session_state['persist_dirctory'],
collection_name = st.session_state['collection'],
device=device)
st.session_state['vectorstore'] = vectorstore
st.session_state['embeddings'] =embeddings
print('The vectorstore load successfully')
def Parameters():
import os
openai_api_key = st.text_input('Enter your Openapi_api_key')
if st.button('Confirm'):
if openai_api_key == '':
st.session_state['openai_api_key'] = os.environ.get('openai_api_key')
else:
st.session_state['openai_api_key'] = openai_api_key
chunk_size = st.text_input('Enter your chunk_size')
if st.button('Confirm_1'):
if chunk_size== '':
st.session_state['chunk_size'] = 1500
chunk_overlap = st.text_input('Enter your chunk_overlap')
if st.button('Confirm_2'):
if chunk_overlap == '':
st.session_state['chunk_overlap'] = 0
def Docs():
col1,col2 = st.columns([1,1])
with col1:
output_text = ''
vectorstore = st.session_state['vectorstore']
edited_output_text = st.text_area("่พๅบๆๆฌ", value=output_text, height=600)
if st.button("Confirm paragraph"):
output_text = edited_output_text
k = st.slider("Select the number of sentences to generate", min_value=1, max_value=5, value=1)
query = st.text_input("Input the query")
if st.button("Confirm query"):
output, docs = get_chain_output(query=query,
vectordb=vectorstore,
k=k,
openai_api_key=st.session_state['openai_api_key'])
final_json = run_text_match(output=output,
query=query,
docs=docs,
k=k,
embeddings=st.session_state['embeddings'])
st.session_state['final_json'] = final_json
with col2:
if 'final_json' in st.session_state:
final_json = st.session_state['final_json']
selected_sentence = st.selectbox("Select a sentence", final_json)
if st.button('Confirm sentence'):
process_selected_sentence(selected_sentence)
###This is the function about Langchain
###Loading PDF part
def load_pdf(path, openai_api_key, chunk_size, chunk_overlap):
from langchain.document_loaders import PyMuPDFLoader, DirectoryLoader, UnstructuredPDFLoader
#from detectron2.config import get_cfg
from PyPDF2 import PdfReader
#cfg = get_cfg()
#cfg.MODEL.DEVICE = 'gpu'
import os
file_names = os.listdir(path)
pdf_file_names = [path + '/' + file_name for file_name in file_names if file_name.endswith('.pdf')]
docs = []
import re
for pdf in pdf_file_names:
source = extract_doi(pdf)
if source != 'None':
doc = PyMuPDFLoader(pdf).load()
for element in doc:
element.metadata = source
element.page_content = re.sub('\n+', ' ', element.page_content.strip())
docs.append(element)
else:
doc = PyMuPDFLoader(pdf).load()
print(f"{pdf} is not identified! Using other strategy!!")
source = extract_doi_llm(doc, openai_api_key)
if source != 'None':
for element in doc:
element.metadata = source
for element in doc:
element.page_content = re.sub('\n+', ' ', element.page_content.strip())
docs.append(element)
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
split_docs = text_splitter.split_documents(docs)
return split_docs
def get_info(path):
from PyPDF2 import PdfReader
with open(path, 'rb') as f:
pdf = PdfReader(f)
info = pdf.metadata
return info
def extract_doi(path):
source = 0
info = get_info(path)
if '/doi' in info:
doi = info['/doi']
elif '/Subject' in info:
Subject = info['/Subject']
if 'doi:' in Subject:
Subject = Subject.split('doi:')
doi = Subject[1]
else:
source = 'None'
elif '/WPS-ARTICLEDOI' in info:
doi = info['/WPS-ARTICLEDOI']
else:
source = 'None'
if source != 'None':
import habanero
import time
citation = habanero.cn.content_negotiation(ids=doi, format='bibentry')
time.sleep(5)
import bibtexparser
citation = bibtexparser.loads(citation)
citation = citation.entries[0]
source = {'author': citation['author'],
'year': citation['year'],
'title': citation['title'],
'journal': citation['journal'],
}
return source
def extract_doi_llm(doc,openai_api_key):
import re
doc[0].page_content = re.sub('\n+',' ',doc[0].page_content.strip())
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500,chunk_overlap = 50)
split_docs = text_splitter.split_documents(doc)
abstract = split_docs[0]
doi = extract_chain(abstract,openai_api_key)
if doi != 'None' and doi!= None:
import habanero
import time
citation = habanero.cn.content_negotiation(ids = doi,format='bibentry')
time.sleep(5)
import bibtexparser
citation = bibtexparser.loads(citation)
citation = citation.entries[0]
source = {'author':citation['author'],
'year':citation['year'],
'title':citation['title'],
'journal':citation['journal'],
}
return source
else:
source = 'None'
return source
def extract_chain(abstract, openai_api_key):
from kor.extraction import create_extraction_chain
from kor.nodes import Object, Text, Number
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
openai_api_key=openai_api_key,
temperature=0,
)
schema = Object(
id="doi",
description="doi is a digital identifier.It typically starts with 10. followed by a numeric prefix, such as 10.1000/182.",
attributes=[
Text(
id="doi",
description='doi is a digital identifier. It typically starts with "10." followed by a numeric prefix, such as 10.1000/182.',
examples=[
(
'American Economic Journal: Economic Policy 2015, 7(4): 223โ242 http://dx.doi.org/10.1257/pol.20130367 223 Water Pollution Progress at Borders: The',
'http://dx.doi.org/10.1257/pol.20130367'),
(
'Environment and Development Economics (2020), 1โ17 doi:10.1017/S1355770X2000025X EDE RESEARCH ARTICLE Political incentives, Party Congress, and pollution cycle: empirical evidence from China Zhihua Tian,1 and Yanfang Tian2* 1School of Economics, Zhejiang University of Technology, Hangzhou',
'10.1017/S1355770X2000025X')
],
many=True
)
],
many=False
)
chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json')
output = chain.predict_and_parse(text=abstract.page_content)
if 'doi' not in output['data']:
print(f"LLM strategy failed!!{abstract.metadata['source']} Please manually add it!!")
source = 'None'
return source
else:
if output['data']['doi']['doi'] == []:
print(f"LLM strategy failed!!{abstract.metadata['source']} Please manually add it!!")
source = 'None'
return source
else:
doi = output['data']['doi']['doi'][0]
if 'doi=' in doi:
doi = doi.split('doi=')[1]
return doi
###Loading the database
def generate_vectorstore(split_docs, device, model_name, persist_directory, collection_name):
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
model_kwargs = {'device': device}
model_name = model_name
embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
persist_directory = persist_directory
collection_name = collection_name
vectorstore = Chroma.from_documents(split_docs, embeddings, collection_name=collection_name,
persist_directory=persist_directory)
vectorstore.persist()
return vectorstore,embeddings
def load_vectorstore(persist_directory,device,model_name,collection_name):
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
model_kwargs = {'device': device}
model_name = model_name
embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
vectordb = Chroma(collection_name=collection_name,
persist_directory=persist_directory,
embedding_function=embeddings)
return vectordb,embeddings
###Using Langchain and match
def get_chain_output(query, vectordb, k, openai_api_key):
docs = vectordb.similarity_search(query, 6, include_metadata=True)
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(openai_api_key=openai_api_key, temperature=0, model_name="gpt-3.5-turbo")
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.llms import OpenAI
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field, validator
from typing import List, Union, Optional
class Sentence(BaseModel):
sentence: List[str] = Field(
description="The sentence in the given document which is the most similar to the query provided")
source: List[str] = Field(description="The meta source of the paper")
score: List[float] = Field(
description="The similarity score between the sentence selected and the query provided")
parser = PydanticOutputParser(pydantic_object=Sentence)
dic = {'1':"one",
"2":"two",
"3":"three",
"4":"four",
"5":"five"}
k = dic[str(k)]
question_template = f"""
Given the document and query, find {k} sentences in the document that are most similar in meaning to the query.
Return the sentences, the meta source of the sentences and the cosine similarity scores.
If no similar sentences is found, return the sentence with highest cosine siliarity scores.
"""
main_template = """
{query}
===========
{context}
===========
{format_instructions}
"""
question_template = question_template+main_template
from langchain.chains.question_answering import load_qa_chain
from langchain import LLMChain
PROMPT = PromptTemplate(template=question_template,
input_variables=['query', 'context'],
partial_variables={"format_instructions": parser.get_format_instructions()})
llm_chain = LLMChain(llm=llm, prompt=PROMPT)
output = llm_chain({"query": query, "context": docs})
return output, docs
def run_text_match(output, k,query, docs,embeddings):
import re
text = re.sub("\n+", "", output['text'])
import json
json_obj = json.loads(text)
if "properties" in json_obj:
print('No result was found, Using embedding searching strategy!!!')
split_docs = split_for_embedding(docs)
similar_sentence = search_cosine_similarity(query,k,split_docs, embeddings)
return similar_sentence
else:
json_obj = [{'sentence': json_obj['sentence'][i],
'source': json_obj['source'][i],
'score': json_obj['score'][i]} for i in range(k)]
return json_obj
def split_for_embedding(docs): ##่พๅ
ฅdocs(list),่พๅบsplit_for embedding(list)
for_embedding = []
for content in docs:
new_content = content.page_content.replace('et al.', 'et alใ')
new_content = new_content.split('.')
if 'source' in content.metadata:
meta_data = content.metadata['source']
else:
meta_data = content.metadata
for split_content in new_content:
split_content = split_content.replace('ใ', '.')
if len(split_content) < 30:
continue
else:
for_embedding.append({"content": split_content, "source": meta_data})
return for_embedding
def search_cosine_similarity(query, k,split_docs, embeddings): ##query-str,split_docs-list,embeddings-embeddings()
split_docs_content = [content['content'] for content in split_docs]
split_docs_content = list(set(split_docs_content))
embed_docs = embeddings.embed_documents(split_docs_content)
embed_query = embeddings.embed_query(query)
from openai.embeddings_utils import cosine_similarity
cos_index = []
for embed_doc in embed_docs:
cos_index.append(cosine_similarity(embed_doc, embed_query))
# ่ฟ่พนๆฏๆ นๆฎๅคงๅฐๅปบ็ซ็ดขๅผ
idx = sorted(range(len(cos_index)), key=lambda k: cos_index[k]) # ๆ นๆฎcos_index็ๅคงๅฐ่ฟ่กๆๅบ
final_similar_list = []
for index in idx[-k:]:
unit = {}
unit['sentences'] = split_docs_content[index]
unit['source'] = split_docs[index]['source']
unit['score'] = cos_index[index]
final_similar_list.append(unit)
return final_similar_list
def main():
st.title("Literature Review Tool")
sys.stdout = StreamlitWriter()
# Create a toggle button to switch between pages
page = st.sidebar.radio("Choose a page", [ "Parameter","Vector Database","Docs"])
if page == "Parameter":
Parameters()
elif page == "Vector Database":
Vector_Databse()
elif page == "Docs":
Docs()
def my_function(input_text):
# ๅจๆญคๅคๆทปๅ ๆจ็ๅค็้ป่พ
output_text = input_text.upper()
return output_text
def process_selected_sentence(selected_sentence):
# ๅจๆ็ป่พๅบๅบๅๅฑ็คบ็จๆท้ๆฉ็ๅฅๅญ
st.write(f"You selected: {selected_sentence}")
main() | [
"\n Given the document and query, find PLACEHOLDER sentences in the document that are most similar in meaning to the query. \n Return the sentences, the meta source of the sentences and the cosine similarity scores. \n If no similar sentences is found, return the sentence with highest cosine siliarity scores.\n ",
"format_instructions",
"PLACEHOLDERPLACEHOLDER",
"\n {query}\n ===========\n {context}\n ===========\n {format_instructions}\n\n ",
"context"
] |
2024-01-10 | Kororinpas/Lit_Tool | pdf_retrieval.py | from operator import itemgetter
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.document_loaders import DataFrameLoader, PyMuPDFLoader
import os
import fitz
import pandas as pd
import json
import ast
def fonts(doc, granularity=False, pages=2):
"""Extracts fonts and their usage in PDF documents.
:param doc: PDF document to iterate through
:type doc: <class 'fitz.fitz.Document'>
:param granularity: also use 'font', 'flags' and 'color' to discriminate text
:type granularity: bool
:rtype: [(font_size, count), (font_size, count}], dict
:return: most used fonts sorted by count, font style information
"""
styles = {}
font_counts = {}
pageCounter = 0
for page in doc:
blocks = page.get_text("dict")["blocks"]
for b in blocks: # iterate through the text blocks
if b['type'] == 0: # block contains text
for l in b["lines"]: # iterate through the text lines
for s in l["spans"]: # iterate through the text spans
if granularity:
identifier = "{0}_{1}_{2}_{3}".format(s['size'], s['flags'], s['font'], s['color'])
styles[identifier] = {'size': s['size'], 'flags': s['flags'], 'font': s['font'],
'color': s['color']}
else:
identifier = "{0}".format(s['size'])
styles[identifier] = {'size': s['size'], 'font': s['font']}
font_counts[identifier] = font_counts.get(identifier, 0) + 1 # count the fonts usage
pageCounter += 1
if pageCounter >= pages:
break
font_counts = sorted(font_counts.items(), key=itemgetter(1), reverse=True)
if len(font_counts) < 1:
raise ValueError("Zero discriminating fonts found!")
return font_counts, styles
def font_tags(font_counts, styles):
"""Returns dictionary with font sizes as keys and tags as value.
:param font_counts: (font_size, count) for all fonts occuring in document
:type font_counts: list
:param styles: all styles found in the document
:type styles: dict
:rtype: dict
:return: all element tags based on font-sizes
"""
p_style = styles[font_counts[0][0]] # get style for most used font by count (paragraph)
p_size = p_style['size'] # get the paragraph's size
# sorting the font sizes high to low, so that we can append the right integer to each tag
font_sizes = []
for (font_size, count) in font_counts:
font_sizes.append(float(font_size))
font_sizes.sort(reverse=True)
# aggregating the tags for each font size
idx = 0
size_tag = {}
for size in font_sizes:
idx += 1
if size == p_size:
idx = 0
size_tag[size] = '<p>'
if size > p_size:
size_tag[size] = '<h{0}>'.format(idx)
elif size < p_size:
size_tag[size] = '<s{0}>'.format(idx)
return size_tag
def get_pdf_raw_pages(doc, pages):
header_para = []
pageCounter = 0
for page in doc:
blocks = page.get_text("dict")["blocks"]
header_para.append(blocks)
pageCounter += 1
if pageCounter >= pages:
break
return header_para
def headers_para(doc, size_tag, pages=2):
"""Scrapes headers & paragraphs from PDF and return texts with element tags.
:param doc: PDF document to iterate through
:type doc: <class 'fitz.fitz.Document'>
:param size_tag: textual element tags for each size
:type size_tag: dict
:rtype: list
:return: texts with pre-prended element tags
"""
header_para = [] # list with headers and paragraphs
first = True # boolean operator for first header
previous_s = {} # previous span
pageCounter = 0
for page in doc:
blocks = page.get_text("dict")["blocks"]
for b in blocks: # iterate through the text blocks
# header_para.append("<section_block>")
if b['type'] == 0: # this block contains text
# REMEMBER: multiple fonts and sizes are possible IN one block
block_string = "" # text found in block
for l in b["lines"]: # iterate through the text lines
for s in l["spans"]: # iterate through the text spans
if s['text'].strip(): # removing whitespaces:
if first:
previous_s = s
first = False
block_string = size_tag[s['size']] + s['text']
else:
if s['size'] == previous_s['size']:
if block_string and all((c == "|") for c in block_string):
# block_string only contains pipes
block_string = size_tag[s['size']] + s['text']
if block_string == "":
# new block has started, so append size tag
block_string = size_tag[s['size']] + s['text']
else: # in the same block, so concatenate strings
block_string += " " + s['text']
else:
header_para.append(block_string)
block_string = size_tag[s['size']] + s['text']
previous_s = s
# new block started, indicating with a pipe
block_string += "|"
# header_para.append("<text_block>")
header_para.append(block_string)
# header_para.append("<text_block_end>")
# header_para.append("<section_block_end>")
pageCounter += 1
if pageCounter >= pages:
break
return header_para
def get_pdf_first_page_txt(pdf_path, pages=2):
doc = fitz.open(pdf_path)
font_counts, styles = fonts(doc, granularity=False, pages=pages)
size_tag = font_tags(font_counts, styles)
return headers_para(doc, size_tag, pages)
def get_pdf_pages(pdf_path, pages=2):
docs = PyMuPDFLoader(pdf_path).load()
return docs[:pages]
# texts = []
# for doc in docs[:pages]:
# texts.append(doc.page_content)
# return texts
def get_pdf_page_metadata(pdf_path, pages):
pdf_first_page_txt = get_pdf_first_page_txt(pdf_path, pages)
template = """
I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting
specific details, namely: article title, author, abstract and keywords section. Please be aware that if you encounter
JEL classifications such as C12 and P34, kindly ignore them and refrain from including them in the abstract and keywords.
{format_instructions}
Wrap your final output as a json objects
INPUT:
{pdf_first_page_txt}
YOUR RESPONSE:
"""
response_schemas = [
ResponseSchema(name="title", description="extracted title"),
ResponseSchema(name="author", description="extracted authors seperated by comma"),
ResponseSchema(name="abstract", description="extracted abstract"),
ResponseSchema(name="keywords", description="extracted keywords")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(template)
],
input_variables=["pdf_first_page_txt"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
llm = ChatOpenAI(model_name='gpt-3.5-turbo-16k',temperature=0.0,max_tokens=6048) # type: ignore gpt-3.5-turbo
final_prompt = prompt.format_prompt(pdf_first_page_txt=pdf_first_page_txt)
output = llm(final_prompt.to_messages())
try:
result = output_parser.parse(output.content)
except:
if "```json" in output.content:
json_string = output.content.split("```json")[1].strip()
else:
json_string = output.content
result = fix_JSON(json_string)
head, tail = os.path.split(pdf_path)
result["filename"] = tail
return result
def get_pdf_page_accept_metadata(pdf_path, pages):
pdf_first_page_txt = get_pdf_first_page_txt(pdf_path, pages)
template = """
I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file.
I need help identifying the accepted date of the article. If the accepted date is not explicitly specified,
it should be located either at the top or bottom of the first or second page of the article in a date format without the prefix 'accepted'.
{format_instructions}
Wrap your final output as a json objects
INPUT:
{pdf_first_page_txt}
YOUR RESPONSE:
"""
response_schemas = [
ResponseSchema(name="accepted", description="extracted accepted date")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(template)
],
input_variables=["pdf_first_page_txt"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
llm = ChatOpenAI(model_name='gpt-3.5-turbo',temperature=0.0,max_tokens=148) # type: ignore gpt-3.5-turbo
final_prompt = prompt.format_prompt(pdf_first_page_txt=pdf_first_page_txt)
output = llm(final_prompt.to_messages())
try:
result = output_parser.parse(output.content)
except:
if "```json" in output.content:
json_string = output.content.split("```json")[1].strip()
else:
json_string = output.content
result = fix_JSON(json_string)
head, tail = os.path.split(pdf_path)
result["filename"] = tail
return result
def get_pdf_intro(pdf_path, pages):
pdf_first_page_txt = get_pdf_first_page_txt(pdf_path, pages)
template = """
I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting
introduction section. Typically, the introduction section begins after the abstract and ends before the next sub-title or section heading.
Wrap your final output as a json objects
INPUT:
{pdf_first_page_txt}
YOUR RESPONSE:
"""
response_schemas = [
ResponseSchema(name="introduction", description="extracted introduction")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(template)
],
input_variables=["pdf_first_page_txt"],
# partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
llm = ChatOpenAI(model_name='gpt-3.5-turbo-16k',temperature=0.0,max_tokens=8396) # type: ignore gpt-3.5-turbo
final_prompt = prompt.format_prompt(pdf_first_page_txt=pdf_first_page_txt)
output = llm(final_prompt.to_messages())
try:
result = output_parser.parse(output.content)
except Exception as e:
print(str(e))
if "```json" in output.content:
json_string = output.content.split("```json")[1].strip()
else:
json_string = output.content
result = fix_JSON(json_string)
head, tail = os.path.split(pdf_path)
result["filename"] = tail
return result
def get_polish_intro(my_intro, sample_introes, words_limit, temperature):
template = """
I require an introduction for my Journal of Economic Literature and I would appreciate it \
if you could compose it for me around {words_limit} words. I would like the introduction mimic on the \
sample introductions that I will provide. If I have already provided my own introduction, \
please refine it accordingly.
% My own introduction: {my_intro}
% Sample introductions:
{sample_introes}
% End of sample introductions:
YOUR RESPONSE:
"""
response_schemas = [
ResponseSchema(name="introduction", description="refined introduction")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(template)
],
input_variables=["my_intro","sample_introes","words_limit"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
llm = ChatOpenAI(model_name='gpt-3.5-turbo',temperature=temperature,max_tokens=2048) # type: ignore gpt-3.5-turbo
final_prompt = prompt.format_prompt(my_intro=my_intro, sample_introes=sample_introes, words_limit=words_limit)
output = llm(final_prompt.to_messages())
result = output.content
return result
def fix_JSON(json_message=None):
result = None
try:
result = json.loads(json_message)
except Exception as e:
# Find the offending character index:
idx_to_replace = int(str(e).split(' ')[-1].replace(')', ''))
# Remove the offending character:
json_message = list(json_message)
json_message[idx_to_replace] = ' '
new_message = ''.join(json_message)
return fix_JSON(json_message=new_message)
return result
def save_pdfs_to_db(pdf_files, excel_file, meta_type='meta', pages=2):
if os.path.exists(excel_file):
df = pd.read_excel(excel_file)
existing_data = df.to_dict(orient='records')
else:
existing_data = []
existing_filenames = set(row['filename'] for row in existing_data)
for doc in pdf_files:
head, tail = os.path.split(doc)
if tail not in existing_filenames:
# print('get meta from LLM '+doc)
try:
if meta_type == 'intro':
metadata = get_pdf_intro2(doc, pages)
elif meta_type == 'date':
metadata = get_pdf_page_accept_metadata(doc, pages)
else:
metadata = get_pdf_page_metadata(doc, pages)
temp_data = []
temp_data.append(metadata)
save_to_excel(existing_data+temp_data, excel_file)
existing_data += temp_data
print("Data append to ", excel_file)
except Exception as e:
print(str(e))
def get_metadata_from_db(excel_file):
df = pd.read_excel(excel_file)
dict = df.to_dict(orient='records',)
return dict
def get_column_from_db(excel_file, column):
df = pd.read_excel(excel_file)
doc = DataFrameLoader(df, column).load()
return doc
def get_data_from_csv(file_path, column_name, filter_value):
data = pd.read_csv(file_path, encoding = 'unicode_escape')
filtered_data = data[data[column_name] == filter_value]
dict_data = filtered_data.to_dict(orient='records') #filtered_data.values.tolist()
for row in dict_data:
md = ast.literal_eval(row["metadata"])
# print(type(md))
row["date"] = md["modDate"]
return dict_data
def get_filename_list(similar_dict, path):
filenames = []
for doc in similar_dict['context']:
filenames.append(os.path.join(path, doc.metadata['filename']))
return filenames
def save_to_excel(data, file_path):
df = pd.DataFrame(data)
df.to_excel(file_path, index=False)
def get_pdf_intro2(pdf_path, pages):
pdf_first_page_txt = get_pdf_first_page_txt(pdf_path, pages)
# pdf_first_page_txt = get_pdf_pages(pdf_path, pages)
human_template = """
I have extracted the text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting the introduction section. Typically, the document follows a pattern where the 'abstract' header is encountered, followed by the abstract section. Subsequently, an 'Introduction' header is expected, which is followed by the introduction section. Next, there may be a 'Background' header or other headers indicating different sections. The introduction section generally concludes before the next sub-title or section heading appears, such as 'Background' or other similar headings.
Please continue searching for the introduction section until you reach a clear next sub-title or section heading. However, please note that if you encounter a bottom part between two pages, such as a section starting with 'RECEIVED:' followed by a date, it does not necessarily mean that the introduction section has ended. In such cases, you should continue searching on the next page.
If the text 'www.elsevier.com' appears in the beginning, it indicates that the literature is published on Elsevier and follows a specific format. In this case, the abstract section will start with "A B S T R A C T" and end before the introduction section. The introduction section will typically start with "1. Introduction" and end before the next section header, such as "2. Background". Please continue searching for the introduction section until you reach next section heading such as "2. Background", it has to be started with "2.".
Please provide the introduction section as the final output in JSON format with the key 'Introduction' written in Pascal case.
Exclude the content of the abstract section.
Only include the text within the introduction section and exclude any text prior to it.
INPUT: {pdf_first_page_txt}
YOUR RESPONSE:
"""
response_schemas = [
# ResponseSchema(name="abstract", description="extracted abstract"),
ResponseSchema(name="introduction", description="extracted introduction")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(human_template)
],
input_variables=["pdf_first_page_txt"]
)
llm = ChatOpenAI(model_name='gpt-3.5-turbo-16k',temperature=0.0,max_tokens=6658) # type: ignore gpt-3.5-turbo
final_prompt = prompt.format_prompt(pdf_first_page_txt=pdf_first_page_txt)
output = llm(final_prompt.to_messages())
try:
result = output_parser.parse(output.content)
except Exception as e:
print(str(e))
if "```json" in output.content:
json_string = output.content.split("```json")[1].strip()
else:
json_string = output.content
result = fix_JSON(json_string)
head, tail = os.path.split(pdf_path)
result["filename"] = tail
return result
def main():
documents = ['./data/docs/literature/Do people care about democracy_An experiment exploring the value of voting rights.pdf',
'./data/docs/literature/Expressive voting versus information avoidance_expenrimental evidence in the context of climate change mitigation.pdf',
'./data/docs/literature/Crashing the party_An experimental investigation of strategic voting in primary elections.pdf',
'./data/docs/literature/Economic growth andย political extremism.pdf']
doc = './data/docs/literature_suicide/1-s2.0-S0304387821000432-main.pdf'
doc = './data/docs/literature_suicide/1-s2.0-S0047272721000761-main.pdf'
# doc = './data/docs/literature_suicide/rest_a_00777.pdf'
documents = ['./data/docs/literature/Do people care about democracy_An experiment exploring the value of voting rights.pdf'
,'./data/docs/literature/Expressive voting versus information avoidance_expenrimental evidence in the context of climate change mitigation.pdf'
,'./data/docs/literature/Economic growth andย political extremism.pdf' ]
# './data/docs/literature/Expressive voting versus information avoidance_expenrimental evidence in the context of climate change mitigation.pdf',
# './data/docs/literature/Crashing the party_An experimental investigation of strategic voting in primary elections.pdf',
# './data/docs/literature/Economic growth andย political extremism.pdf']
# save_pdfs_to_db(documents, intro_excel_file, is_Intro=True, pages=4)
metadata = get_pdf_intro2(doc, 2)
print(metadata)
# docs = get_pdf_first_page_txt(doc, 3)
# # docs = get_pdf_pages(doc, 2)
# # docs = get_pdf_raw_pages(doc, 2)
# print(docs)
# pdf_first_page_txt = get_pdf_first_page_txt(doc, 3)
# raw_txt = get_pdf_raw_pages(fitz.open(doc), 2)
# print(raw_txt)
# pdf_first_page_txt = get_pdf_first_page_txt(doc, 3)
# output_file = "data/db/repo_intro_4.xlsx"
# intro354_excel_file = "data/db/repo_intro_35_16.xlsx"
# save_pdfs_to_db(documents, intro354_excel_file, is_intro=True, pages=4)
# intros = [dict["introduction"] for dict in get_metadata_from_db(intro35_excel_file)]
# polish = get_polish_intro('', intros[:3], 600, 0)
# print(polish)
# csv_file = "./data/db/summary.csv"
# column_name = "Theme"
# filter_value = "China authoritarian system"
# data = get_data_from_csv(csv_file, column_name, filter_value)
# print(data)
if __name__ == '__main__':
main() | [
"sample_introes",
"words_limit",
"format_instructions",
"\n I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting \n specific details, namely: article title, author, abstract and keywords section. Please be aware that if you encounter \n JEL classifications such as C12 and P34, kindly ignore them and refrain from including them in the abstract and keywords. \n \n {format_instructions}\n\n Wrap your final output as a json objects\n\n INPUT:\n {pdf_first_page_txt}\n\n YOUR RESPONSE:\n ",
"\nI have extracted the text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting the introduction section. Typically, the document follows a pattern where the 'abstract' header is encountered, followed by the abstract section. Subsequently, an 'Introduction' header is expected, which is followed by the introduction section. Next, there may be a 'Background' header or other headers indicating different sections. The introduction section generally concludes before the next sub-title or section heading appears, such as 'Background' or other similar headings.\n\nPlease continue searching for the introduction section until you reach a clear next sub-title or section heading. However, please note that if you encounter a bottom part between two pages, such as a section starting with 'RECEIVED:' followed by a date, it does not necessarily mean that the introduction section has ended. In such cases, you should continue searching on the next page.\n\nIf the text 'www.elsevier.com' appears in the beginning, it indicates that the literature is published on Elsevier and follows a specific format. In this case, the abstract section will start with \"A B S T R A C T\" and end before the introduction section. The introduction section will typically start with \"1. Introduction\" and end before the next section header, such as \"2. Background\". Please continue searching for the introduction section until you reach next section heading such as \"2. Background\", it has to be started with \"2.\".\n\nPlease provide the introduction section as the final output in JSON format with the key 'Introduction' written in Pascal case.\n\nExclude the content of the abstract section.\n\nOnly include the text within the introduction section and exclude any text prior to it.\n\nINPUT: {pdf_first_page_txt}\n\nYOUR RESPONSE:\n ",
"\n I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file. \n I need help identifying the accepted date of the article. If the accepted date is not explicitly specified, \n it should be located either at the top or bottom of the first or second page of the article in a date format without the prefix 'accepted'. \n \n {format_instructions}\n\n Wrap your final output as a json objects\n\n INPUT:\n {pdf_first_page_txt}\n\n YOUR RESPONSE:\n ",
"pdf_first_page_txt",
"\n I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting \n introduction section. Typically, the introduction section begins after the abstract and ends before the next sub-title or section heading. \n \n Wrap your final output as a json objects\n\n INPUT:\n {pdf_first_page_txt}\n\n YOUR RESPONSE:\n ",
"my_intro",
"\n I require an introduction for my Journal of Economic Literature and I would appreciate it if you could compose it for me around {words_limit} words. I would like the introduction mimic on the sample introductions that I will provide. If I have already provided my own introduction, please refine it accordingly. \n\n % My own introduction: {my_intro}\n\n % Sample introductions:\n {sample_introes}\n % End of sample introductions:\n\n YOUR RESPONSE:\n "
] |
2024-01-10 | Kororinpas/Lit_Tool | pdf_documents.py | from pdf_metadata import get_pdf_metadata
from pdf_metadata_llm import get_pdf_metadata_using_llm
def get_pdf_documents(pdf_files):
from langchain.document_loaders import PyMuPDFLoader,DirectoryLoader,UnstructuredPDFLoader
docs =[]
import re
for pdf_fullpath in pdf_files:
metadata = get_pdf_metadata(pdf_fullpath)
if metadata != 'None':
doc = PyMuPDFLoader(pdf_fullpath).load()
for element in doc:
element.metadata = metadata
element.page_content = re.sub('\n+',' ',element.page_content.strip())
docs.append(element)
else:
doc = PyMuPDFLoader(pdf_fullpath).load()
print(f"{pdf_fullpath} is not identified! Using other strategy!!")
metadata = get_pdf_metadata_using_llm(doc)
if metadata != 'None':
for element in doc:
element.metadata = metadata
for element in doc:
element.page_content = re.sub('\n+',' ',element.page_content.strip())
docs.append(element)
return docs | [] |
2024-01-10 | Kororinpas/Lit_Tool | pdf_metadata_llm.py | from doi import get_doi
from document_util import get_split_documents
def get_pdf_metadata_using_llm(doc):
import re
doc[0].page_content = re.sub('\n+',' ',doc[0].page_content.strip())
# from langchain.text_splitter import RecursiveCharacterTextSplitter
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500,chunk_overlap = 50)
split_docs = get_split_documents(doc, 1500, 50)
abstract = split_docs[0]
doi = get_doi(abstract)
if doi != 'None':
import habanero
import time
citation = habanero.cn.content_negotiation(ids = doi,format='bibentry')
time.sleep(5)
import bibtexparser
citation = bibtexparser.loads(citation)
citation = citation.entries[0]
metadata = {'author':citation['author'],
'year':citation['year'],
'title':citation['title'],
'journal':citation['journal'],
}
return metadata
else:
metadata = 'None'
return metadata | [] |
2024-01-10 | Kororinpas/Lit_Tool | cosine_match.py | def search_cosine_similarity(query,split_docs,embeddings): ##query-str,split_docs-list,embeddings-embeddings()
split_docs_content = [content['content'] for content in split_docs]
embed_docs = embeddings.embed_documents(split_docs_content)
embed_query= embeddings.embed_query(query)
from openai.embeddings_utils import cosine_similarity
cos_index = []
for embed_doc in embed_docs:
cos_index.append(cosine_similarity(embed_doc,embed_query))
#่ฟ่พนๆฏๆ นๆฎๅคงๅฐๅปบ็ซ็ดขๅผ
idx = sorted(range(len(cos_index)),key=lambda k:cos_index[k]) #ๆ นๆฎcos_index็ๅคงๅฐ่ฟ่กๆๅบ
final_similar_list = []
for index in idx[-3:]:
unit = {}
unit['sentences']=split_docs_content[index]
unit['source']=split_docs[index]['source']
unit['score']=cos_index[index]
final_similar_list.append(unit)
return final_similar_list | [] |
2024-01-10 | Kororinpas/Lit_Tool | embedding_function.py | def get_embedding_function():
from langchain.embeddings import HuggingFaceEmbeddings
import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device':device}
return HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs) | [] |
2024-01-10 | Kororinpas/Lit_Tool | doi.py | def get_doi(abstract):
from kor.extraction import create_extraction_chain
from kor.nodes import Object, Text, Number
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) # type: ignore
schema = Object(
id="doi",
description="doi is a digital identifier.It typically starts with 10. followed by a numeric prefix, such as 10.1000/182.",
attributes=[
Text(
id="doi",
description='doi is a digital identifier. It typically starts with "10." followed by a numeric prefix, such as 10.1000/182.',
examples=[
('American Economic Journal: Economic Policy 2015, 7(4): 223โ242 http://dx.doi.org/10.1257/pol.20130367 223 Water Pollution Progress at Borders: The','http://dx.doi.org/10.1257/pol.20130367'),
('Environment and Development Economics (2020), 1โ17 doi:10.1017/S1355770X2000025X EDE RESEARCH ARTICLE Political incentives, Party Congress, and pollution cycle: empirical evidence from China Zhihua Tian,1 and Yanfang Tian2* 1School of Economics, Zhejiang University of Technology, Hangzhou','10.1017/S1355770X2000025X')
],
many=True
)
],
many=False
)
chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json')
output = chain.predict_and_parse(text=abstract.page_content)
if 'doi' not in output['data']:
print(f"LLM strategy failed!!{abstract.metadata['source']} Please manually add it!!")
source = 'None'
return source
else:
doi = output['data']['doi']['doi'][0]
if 'doi=' in doi:
doi = doi.split('doi=')[1]
return doi
| [] |
2024-01-10 | jied-O/Jids-Garage | langchainagentstest.py | from langchain import OpenAI
from langchain.chains import LLMChain
from langchain.chains import PALChain
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.agents import load_tools
from ogbujipt.config import openai_emulation
from ogbujipt.model_style.alpaca import prep_instru_inputs, ALPACA_PROMPT_TMPL
from langchain.prompts import PromptTemplate
openai_emulation(host="http://192.168.0.73", port="8000")
def simpleWordPrompt():
prompt = PromptTemplate(
input_variables=["place"],
template="What is the capital of {place}?",
)
print(prompt.format(place="Nigeria"))
llm = OpenAI(temperature=0.1)
llmchain = LLMChain(llm=llm, prompt=prompt)
response = llmchain.run(place="Nigeria")
print(response)
def MathWorldProblem():
llm = OpenAI(temperature=0.1)
palchain = PALChain.from_math_prompt(llm=llm, verbose=True)
response = palchain.run(
"If my age is half of my dad's age and he is going to be 60 next year, what is my current age?"
)
print(response)
def agentTest():
llm = OpenAI(temperature=0)
tools = load_tools(["pal-math"], llm=llm)
agent = initialize_agent(tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True)
agent.run("If my age is half of my dad's age and he is going to be 60 next year, what is my current age?")
def main():
MathWorldProblem()
if __name__ == "__main__":
main() | [
"What is the capital of {place}?"
] |
2024-01-10 | tarunsamanta2k20/quivr | backend~parsers~audio.py | import os
import tempfile
import time
from io import BytesIO
from tempfile import NamedTemporaryFile
import openai
from fastapi import UploadFile
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from utils import compute_sha1_from_content, documents_vector_store
# # Create a function to transcribe audio using Whisper
# def _transcribe_audio(api_key, audio_file, stats_db):
# openai.api_key = api_key
# transcript = ""
# with BytesIO(audio_file.read()) as audio_bytes:
# # Get the extension of the uploaded file
# file_extension = os.path.splitext(audio_file.name)[-1]
# # Create a temporary file with the uploaded audio data and the correct extension
# with tempfile.NamedTemporaryFile(delete=True, suffix=file_extension) as temp_audio_file:
# temp_audio_file.write(audio_bytes.read())
# temp_audio_file.seek(0) # Move the file pointer to the beginning of the file
# transcript = openai.Audio.translate("whisper-1", temp_audio_file)
# return transcript
async def process_audio(upload_file: UploadFile, stats_db):
file_sha = ""
dateshort = time.strftime("%Y%m%d-%H%M%S")
file_meta_name = f"audiotranscript_{dateshort}.txt"
# uploaded file to file object
openai_api_key = os.environ.get("OPENAI_API_KEY")
# Here, we're writing the uploaded file to a temporary file, so we can use it with your existing code.
with tempfile.NamedTemporaryFile(delete=False, suffix=upload_file.filename) as tmp_file:
await upload_file.seek(0)
content = await upload_file.read()
tmp_file.write(content)
tmp_file.flush()
tmp_file.close()
with open(tmp_file.name, "rb") as audio_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
file_sha = compute_sha1_from_content(transcript.text.encode("utf-8"))
file_size = len(transcript.text.encode("utf-8"))
# Load chunk size and overlap from sidebar
chunk_size = 500
chunk_overlap = 0
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size, chunk_overlap=chunk_overlap)
texts = text_splitter.split_text(transcript)
docs_with_metadata = [Document(page_content=text, metadata={"file_sha1": file_sha, "file_size": file_size, "file_name": file_meta_name,
"chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort}) for text in texts]
# if st.secrets.self_hosted == "false":
# add_usage(stats_db, "embedding", "audio", metadata={"file_name": file_meta_name,"file_type": ".txt", "chunk_size": chunk_size, "chunk_overlap": chunk_overlap})
documents_vector_store.add_documents(docs_with_metadata)
return documents_vector_store
| [] |
2024-01-10 | sshh12/llm_optimize | llm_optimize~optimize.py | from typing import Callable, Optional, Tuple, List
import re
import openai
from langchain.input import print_text
from langchain.prompts.chat import (
SystemMessage,
HumanMessage,
AIMessage,
)
from llm_optimize import llm, constants
# The numeric score and the LLM-facing representation
ScoreTuple = Tuple[float, str]
# Best score, history of scores, best x0
OptimizationResultTuple = Tuple[float, List[float], str]
def run(
task_description: str,
task_question: str,
func: Callable[[str], ScoreTuple],
x0: str,
max_steps: Optional[int] = 10,
model: Optional[llm.LLMModel] = None,
verbose: Optional[bool] = True,
system_prompt: Optional[str] = constants.SYSTEM_PROMPT,
human_prompt: Optional[str] = constants.HUMAN_OPTIMIZATION_PROMPT,
stop_score: Optional[float] = None,
) -> OptimizationResultTuple:
if model is None:
model = llm.get_default_llm()
def _log(text: str, color: str):
if verbose:
print_text(text + "\n", color)
x = x0
score, fx = func(x)
best_score = score
best_x = x
_log(x, "blue")
_log(fx, "green")
messages = [
SystemMessage(content=system_prompt.format(task_description=task_description)),
HumanMessage(content=human_prompt.format(task_question=task_question, x=x, fx=fx)),
]
score_hist = [score]
for _ in range(max_steps):
try:
resp = model(messages).content
except openai.error.InvalidRequestError as e:
_log(str(e), "red")
# drop the first set of results to reduce token usage
messages.pop(1)
messages.pop(1)
resp = model(messages).content
_log(resp, "yellow")
try:
x = re.findall("```(?:\w+)?([\s\S]+)```", resp)[0]
except IndexError as e:
_log(f"Stopping early, failed to parse response. {e}", "red")
break
_log(x, "blue")
score, fx = func(x)
score_hist.append(score)
if score > best_score:
best_x = x
best_score = score
_log(fx, "green")
messages.append(AIMessage(content=resp))
messages.append(HumanMessage(content=human_prompt.format(task_question=task_question, x=x, fx=fx)))
if stop_score is not None and best_score >= stop_score:
break
return (best_score, score_hist, best_x)
| [] |
2024-01-10 | xiahan4956/Auto_Claude_100k | autogpt~llm~api_manager.py | from __future__ import annotations
from typing import List, Optional
import openai
from openai import Model
from autogpt.config import Config
from autogpt.llm.base import CompletionModelInfo, MessageDict
from autogpt.llm.providers.openai import OPEN_AI_MODELS
from autogpt.logs import logger
from autogpt.singleton import Singleton
class ApiManager(metaclass=Singleton):
def __init__(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0
self.models: Optional[list[Model]] = None
def reset(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0.0
self.models = None
def create_chat_completion(
self,
messages: list[MessageDict],
model: str | None = None,
temperature: float = None,
max_tokens: int | None = None,
deployment_id=None,
):
"""
Create a chat completion and update the cost.
Args:
messages (list): The list of messages to send to the API.
model (str): The model to use for the API call.
temperature (float): The temperature to use for the API call.
max_tokens (int): The maximum number of tokens for the API call.
Returns:
str: The AI's response.
"""
cfg = Config()
if temperature is None:
temperature = cfg.temperature
if deployment_id is not None:
response = openai.ChatCompletion.create(
deployment_id=deployment_id,
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=cfg.openai_api_key,
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=cfg.openai_api_key,
)
if not hasattr(response, "error"):
logger.debug(f"Response: {response}")
prompt_tokens = response.usage.prompt_tokens
completion_tokens = response.usage.completion_tokens
self.update_cost(prompt_tokens, completion_tokens, model)
return response
def update_cost(self, prompt_tokens, completion_tokens, model: str):
"""
Update the total cost, prompt tokens, and completion tokens.
Args:
prompt_tokens (int): The number of tokens used in the prompt.
completion_tokens (int): The number of tokens used in the completion.
model (str): The model used for the API call.
"""
# the .model property in API responses can contain version suffixes like -v2
model = model[:-3] if model.endswith("-v2") else model
model_info = OPEN_AI_MODELS[model]
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
self.total_cost += prompt_tokens * model_info.prompt_token_cost / 1000
if issubclass(type(model_info), CompletionModelInfo):
self.total_cost += (
completion_tokens * model_info.completion_token_cost / 1000
)
logger.debug(f"Total running cost: ${self.total_cost:.3f}")
def set_total_budget(self, total_budget):
"""
Sets the total user-defined budget for API calls.
Args:
total_budget (float): The total budget for API calls.
"""
self.total_budget = total_budget
def get_total_prompt_tokens(self):
"""
Get the total number of prompt tokens.
Returns:
int: The total number of prompt tokens.
"""
return self.total_prompt_tokens
def get_total_completion_tokens(self):
"""
Get the total number of completion tokens.
Returns:
int: The total number of completion tokens.
"""
return self.total_completion_tokens
def get_total_cost(self):
"""
Get the total cost of API calls.
Returns:
float: The total cost of API calls.
"""
return self.total_cost
def get_total_budget(self):
"""
Get the total user-defined budget for API calls.
Returns:
float: The total budget for API calls.
"""
return self.total_budget
def get_models(self) -> List[Model]:
"""
Get list of available GPT models.
Returns:
list: List of available GPT models.
"""
if self.models is None:
all_models = openai.Model.list()["data"]
self.models = [model for model in all_models if "gpt" in model["id"]]
return self.models
| [] |
2024-01-10 | xiahan4956/Auto_Claude_100k | autogpt~llm~utils~claude.py | from autogpt.config import Config
import time
import openai
import json
CFG = Config()
openai.api_key = CFG.openai_api_key
MAX_TOKEN_ONCE = 100000
CONTINUE_PROMPT = "... continue"
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
def _sendReq(anthropic, prompt, max_tokens_to_sample):
print("----------------request----------------")
print(prompt)
print("----------------request----------------\n")
print("the input words of claude: "+str(len(prompt)))
for _ in range(5):
try:
response = anthropic.completions.create(
prompt=prompt,
stop_sequences = [HUMAN_PROMPT, AI_PROMPT],
model="claude-2",
max_tokens_to_sample=max_tokens_to_sample,
temperature = 0.3
)
break
except Exception as e:
print(e)
time.sleep(1)
return response
def sendReq(question, max_tokens_to_sample: int = MAX_TOKEN_ONCE):
anthropic = Anthropic(api_key = CFG.claude_api_key)
prompt = f"{question} {anthropic.AI_PROMPT}"
response = _sendReq(anthropic, prompt, max_tokens_to_sample)
data = response.completion
return data
def pmt_gpt_to_claude(question):
question = str(question)[1:-1]
question = question.replace("{\'role\': \'system\', \'content\':","\n\nSYSTEM:")
question = question.replace("{\'role\': \'user\', \'content\':","\n\nHuman:")
question = question.replace("{\'role\': \'assistant\', \'content\':","\n\nAssistant:")
question = question.replace("\'}","")
return question
def fix_claude_json(claude_resp):
messages = [{"role":"system","content":r"1. You will receive a JSON string, and your task is to extract information from it and return it as a JSON object. 2.Use function's json schema to extrct.Please notice the format 3. Be aware that the given JSON may contain errors, so you may need to infer the fields and the format from the JSON string. 4.Do not use \" and \' .you should use ' " },{"role": "user", "content": claude_resp}]
functions = [
{
"name": "parse_claude_json",
"description": "parse a claude response to the json",
"parameters": {
"type": "object",
"properties": {
"thoughts": {
"type": "object",
"properties": {
"text": {
"type": "string",
"description": "thoughts"
},
"reasoning": {
"type": "string"
},
"plan": {
"type": "string",
"description": "it is a string,not list.If you find it is list,please use correct it "
},
"criticism": {
"type": "string",
"description": "constructive self-criticism"
},
"speak": {
"type": "string",
"description": "thoughts summary to say to user"
}
},
"required": ["text", "reasoning", "plan", "criticism", "speak"],
},
"command": {
"type": "object",
"properties": {
"name": {"type": "string"},
"args": {
"type": "object"
}
},
"required": ["name", "args"],
}
},
"required": ["thoughts", "command"],
},
},
]
resp_json = claude_resp
for _ in range(5):
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=functions,
max_tokens=3000,
temperature=0.0,
)
resp_json = response["choices"][0]["message"]["function_call"]["arguments"]
break
except Exception as e:
time.sleep(1)
print(e)
# fix the plan
try:
resp_json = json.loads(resp_json)
resp_json["thoughts"]["plan"] = str(resp_json["thoughts"]["plan"]).replace("[","").replace("]","")
resp_json = json.dumps(resp_json)
except Exception as e:
print(e)
return resp_json
| [
"f\"{question} {anthropic.AI_PROMPT}",
"1. You will receive a JSON string, and your task is to extract information from it and return it as a JSON object. 2.Use function's json schema to extrct.Please notice the format 3. Be aware that the given JSON may contain errors, so you may need to infer the fields and the format from the JSON string. 4.Do not use \\\" and \\' .you should use ' ",
"... continue"
] |
2024-01-10 | pkrack/asp | asp~ppo_patched.py | import warnings
from typing import Any, Dict, Optional, Type, TypeVar, Union
import numpy as np
import torch as th
from gymnasium import spaces
from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, BasePolicy, MultiInputActorCriticPolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import explained_variance, get_schedule_fn
from torch.nn import functional as F
SelfPPO = TypeVar("SelfPPO", bound="PPO")
class PPO(OnPolicyAlgorithm):
"""
Proximal Policy Optimization algorithm (PPO) (clip version)
Paper: https://arxiv.org/abs/1707.06347
Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)
NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization)
See https://github.com/pytorch/pytorch/issues/29372
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param normalize_advantage: Whether to normalize or not the advantage
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for
debug messages
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
policy_aliases: Dict[str, Type[BasePolicy]] = {
"MlpPolicy": ActorCriticPolicy,
"CnnPolicy": ActorCriticCnnPolicy,
"MultiInputPolicy": MultiInputActorCriticPolicy,
}
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
n_steps: int = 2048,
batch_size: int = 64,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: Union[float, Schedule] = 0.2,
clip_range_vf: Union[None, float, Schedule] = None,
normalize_advantage: bool = True,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
use_sde: bool = False,
sde_sample_freq: int = -1,
target_kl: Optional[float] = None,
tensorboard_log: Optional[str] = None,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super().__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
seed=seed,
_init_setup_model=False,
supported_action_spaces=(
spaces.Box,
spaces.Discrete,
spaces.MultiDiscrete,
spaces.MultiBinary,
),
)
# Sanity check, otherwise it will lead to noisy gradient and NaN
# because of the advantage normalization
if normalize_advantage:
assert (
batch_size > 1
), "`batch_size` must be greater than 1. See https://github.com/DLR-RM/stable-baselines3/issues/440"
if self.env is not None:
# Check that `n_steps * n_envs > 1` to avoid NaN
# when doing advantage normalization
buffer_size = self.env.num_envs * self.n_steps
assert buffer_size > 1 or (
not normalize_advantage
), f"`n_steps * n_envs` must be greater than 1. Currently n_steps={self.n_steps} and n_envs={self.env.num_envs}"
# Check that the rollout buffer size is a multiple of the mini-batch size
untruncated_batches = buffer_size // batch_size
if buffer_size % batch_size > 0:
warnings.warn(
f"You have specified a mini-batch size of {batch_size},"
f" but because the `RolloutBuffer` is of size `n_steps * n_envs = {buffer_size}`,"
f" after every {untruncated_batches} untruncated mini-batches,"
f" there will be a truncated mini-batch of size {buffer_size % batch_size}\n"
f"We recommend using a `batch_size` that is a factor of `n_steps * n_envs`.\n"
f"Info: (n_steps={self.n_steps} and n_envs={self.env.num_envs})"
)
self.batch_size = batch_size
self.n_epochs = n_epochs
self.clip_range = clip_range
self.clip_range_vf = clip_range_vf
self.normalize_advantage = normalize_advantage
self.target_kl = target_kl
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super()._setup_model()
# Initialize schedules for policy/value clipping
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_vf is not None:
if isinstance(self.clip_range_vf, (float, int)):
assert self.clip_range_vf > 0, "`clip_range_vf` must be positive, " "pass `None` to deactivate vf clipping"
self.clip_range_vf = get_schedule_fn(self.clip_range_vf)
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value function
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
entropy_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
continue_training = True
loss = None
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data, bc_data in self.rollout_buffer.get(self.batch_size):
# Re-sample the noise matrix because the log_std has changed
if self.use_sde:
self.policy.reset_noise(self.batch_size)
if bc_data is None:
bc_loss = th.zeros(1, device=self.device)
else:
_, log_probs, _ = self.policy.evaluate_actions(bc_data.obs, bc_data.action)
ratio = th.exp(log_probs - bc_data.log_prob)
bc_loss = -th.mean(th.clamp(ratio, 1 - clip_range, 1 + clip_range)).to(self.device)
if rollout_data is not None:
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)
values = values.flatten()
# Normalize advantage
advantages = rollout_data.advantages
# Normalization does not make sense if mini batchsize == 1, see GH issue #325
if self.normalize_advantage and len(advantages) > 1:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantages * ratio
policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()
clip_fractions.append(clip_fraction)
if self.clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the difference between old and new value
# NOTE: this depends on the reward scaling
values_pred = rollout_data.old_values + th.clamp(
values - rollout_data.old_values, -clip_range_vf, clip_range_vf
)
# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(rollout_data.returns, values_pred)
value_losses.append(value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
entropy_losses.append(entropy_loss.item())
# Calculate approximate form of reverse KL Divergence for early stopping
# see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417
# and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419
# and Schulman blog: http://joschu.net/blog/kl-approx.html
with th.no_grad():
log_ratio = log_prob - rollout_data.old_log_prob
approx_kl_div = th.mean((th.exp(log_ratio) - 1) - log_ratio).cpu().numpy()
approx_kl_divs.append(approx_kl_div)
if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl:
continue_training = False
if self.verbose >= 1:
print(f"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}")
break
loss = (policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss) + self.bc_coef * bc_loss
else:
loss = bc_loss
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
if not continue_training:
break
if loss:
self._n_updates += self.n_epochs
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
if loss:
self.logger.record("train/entropy_loss", np.mean(entropy_losses))
self.logger.record("train/policy_gradient_loss", np.mean(pg_losses))
self.logger.record("train/value_loss", np.mean(value_losses))
self.logger.record("train/approx_kl", np.mean(approx_kl_divs))
self.logger.record("train/clip_fraction", np.mean(clip_fractions))
self.logger.record("train/loss", loss.item())
self.logger.record("train/explained_variance", explained_var)
if hasattr(self.policy, "log_std"):
self.logger.record("train/std", th.exp(self.policy.log_std).mean().item())
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/clip_range", clip_range)
if self.clip_range_vf is not None:
self.logger.record("train/clip_range_vf", clip_range_vf)
else:
self.logger.info("No valid goals in the batch, skipping update")
def learn(
self: SelfPPO,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
tb_log_name: str = "PPO",
reset_num_timesteps: bool = True,
progress_bar: bool = False,
) -> SelfPPO:
return super().learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
tb_log_name=tb_log_name,
reset_num_timesteps=reset_num_timesteps,
progress_bar=progress_bar,
)
| [] |
2024-01-10 | jongio/chat-with-your-data-solution-accelerator | backend~utilities~orchestrator~Strategies.py | from enum import Enum
class OrchestrationStrategy(Enum):
OPENAI_FUNCTION = 'openai_function'
LANGCHAIN = 'langchain'
def get_orchestrator(orchestration_strategy: str):
if orchestration_strategy == OrchestrationStrategy.OPENAI_FUNCTION.value:
from .OpenAIFunctions import OpenAIFunctionsOrchestrator
return OpenAIFunctionsOrchestrator()
elif orchestration_strategy == OrchestrationStrategy.LANGCHAIN.value:
from .LangChainAgent import LangChainAgent
return LangChainAgent()
else:
raise Exception(f"Unknown orchestration strategy: {orchestration_strategy}")
| [] |
2024-01-10 | jongio/chat-with-your-data-solution-accelerator | backend~utilities~document_chunking~Layout.py | from typing import List
from .DocumentChunkingBase import DocumentChunkingBase
from langchain.text_splitter import MarkdownTextSplitter
from .Strategies import ChunkingSettings
from ..common.SourceDocument import SourceDocument
class LayoutDocumentChunking(DocumentChunkingBase):
def __init__(self) -> None:
pass
def chunk(self, documents: List[SourceDocument], chunking: ChunkingSettings) -> List[SourceDocument]:
full_document_content = "".join(list(map(lambda document: document.content, documents)))
document_url = documents[0].source
splitter = MarkdownTextSplitter.from_tiktoken_encoder(chunk_size=chunking.chunk_size, chunk_overlap=chunking.chunk_overlap)
chunked_content_list = splitter.split_text(full_document_content)
# Create document for each chunk
documents = []
chunk_offset = 0
for idx, chunked_content in enumerate(chunked_content_list):
documents.append(
SourceDocument.from_metadata(
content=chunked_content,
document_url=document_url,
metadata={"offset": chunk_offset},
idx=idx,
)
)
chunk_offset += len(chunked_content)
return documents
| [] |
2024-01-10 | jongio/chat-with-your-data-solution-accelerator | backend~utilities~helpers~LLMHelper.py | import openai
from typing import List
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from .EnvHelper import EnvHelper
class LLMHelper:
def __init__(self):
env_helper: EnvHelper = EnvHelper()
# Configure OpenAI API
openai.api_type = "azure"
openai.api_version = env_helper.AZURE_OPENAI_API_VERSION
openai.api_base = env_helper.OPENAI_API_BASE
openai.api_key = env_helper.OPENAI_API_KEY
self.llm_model = env_helper.AZURE_OPENAI_MODEL
self.llm_max_tokens = env_helper.AZURE_OPENAI_MAX_TOKENS if env_helper.AZURE_OPENAI_MAX_TOKENS != '' else None
self.embedding_model = env_helper.AZURE_OPENAI_EMBEDDING_MODEL
def get_llm(self):
return AzureChatOpenAI(deployment_name=self.llm_model, temperature=0, max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
# TODO: This needs to have a custom callback to stream back to the UI
def get_streaming_llm(self):
return AzureChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler], deployment_name=self.llm_model, temperature=0,
max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
def get_embedding_model(self):
return OpenAIEmbeddings(deployment=self.embedding_model, chunk_size=1)
def get_chat_completion_with_functions(self, messages: List[dict], functions: List[dict], function_call: str="auto"):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
functions=functions,
function_call=function_call,
)
def get_chat_completion(self, messages: List[dict]):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
)
| [] |
2024-01-10 | pcc2k00/HousingPriceTrend | HousingPriceTrendMetaphor.py | import openai
import yaml
from metaphor_python import Metaphor
with open("pass.yml") as f:
content = f.read()
my_credentials = yaml.load(content, Loader=yaml.FullLoader)
openai.api_key = my_credentials["openAi"]
metaphor = Metaphor(my_credentials["metaphor"])
USER_QUESTION = "Recent housing price in Seattle"
SYSTEM_MESSAGE = "You are a helpful assistant that generates search queiries based on user questions. Only generate one search query."
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_MESSAGE},
{"role": "user", "content": USER_QUESTION},
],
)
query = completion.choices[0].message.content
search_response = metaphor.search(
query, use_autoprompt=True, start_published_date="2023-07-01"
)
contents_result = search_response.get_contents()
first_result = contents_result.contents[0]
SYSTEM_MESSAGE = "You are a helpful assistant that summarizes the content of a webpage. Summarize the users input."
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_MESSAGE},
{"role": "user", "content": first_result.extract},
],
)
summary = completion.choices[0].message.content
print(f"Summary for {first_result.title}: {summary}")
| [] |
2024-01-10 | romain-cambonie/openxcom-mod-generator | src~chat~ask_for_visual_proposition.py | from openai import OpenAI
from openai.types.chat import ChatCompletion
def ask_for_concept_art(
client: OpenAI,
character_story: str,
art_style_description: str,
) -> str:
system_prompt = (
"Generate a comprehensive and vivid visual concept art of a character for a piece of artwork. "
"The character should fit within a distinct theme and style, and the description must be detailed enough to guide an "
"artist in creating a dynamic and engaging image."
"Here are the guidelines for your description:"
"Theme and Setting: Choose an intriguing theme and setting for the character. It could be anything from a dystopian "
"future to a fantasy world. "
"Describe the setting in a way that complements the character's story and personality."
"Character Details:"
"Physical Appearance: Provide a detailed description of the character's physical features, including hair, eyes, "
"skin, and build."
"Expression and Posture: Convey the character's mood or personality through their expression and posture."
"Attire and Equipment: Describe the character's clothing and any distinctive equipment they might carry, "
"do NOT use proper noun, describe visually what the items look like."
f"Artistic Style: Specify the desired artistic style for the portrayal. The starting point is : "
f"{art_style_description}, make sure to detail the stylistic elements that should be emphasized."
"Composition and Color Palette: Suggest a striking composition for the artwork"
"Describe the character stance"
"Describe the color palette, considering how colors can reflect the character's traits or the mood of the setting."
"Extract up to 8 keys focusing on the art style and composition"
"Use these guidelines to create a structured and detailed visual description for a character based on the following "
"origin story:"
"Focus on making the description as vivid and detailed as possible, so it can easily be translated into a stunning "
"piece of art."
""
"An example of a good concept art result:"
"Keys: Commanding presence, Dynamic composition, Low angle perspective, Cold metallic shades, Warm leather tones, "
"Dramatic lighting, Cyberpunk aesthetic"
"Character Details: She is light-skinned with a muscular build, short blonde hair, and piercing light-colored eyes "
"that radiate intelligence and cunning. Her expression is one of chilling neutrality, a reflection of her spirit "
"shaped by the cold, ruthless Arctic."
"Attire and Equipment: Her attire combines functionality with a touch of brutality โ a sleek, black chest armor that "
"bulges with the strength of her physique, complemented by large shoulder pads. Her arms are covered with highly "
"detailed armor, and her legs are clad in thigh-high boots with sturdy knee pads. Fortified gloves adorn her hands. "
"In one hand, she deftly holds a leather whip, an emblem of elegance and cruelty, while her other hand grips a robust "
"submachine gun. Around her waist are vials containing clear liquid and spherical objects reminiscent of primitive "
"grenades, adding to her enigmatic persona. A handle and a battle axe, symbols of her defiance and skill, "
"are fastened at her side."
"Setting: The backdrop is a post-apocalyptic Arctic tundra, subtly hinting at her origins. The environment should be "
"bleak yet captivating, with remnants of a once-thriving world now lost to chaos and rebellion."
"Artistic Style and Composition: The portrait should capture her commanding presence amidst this desolate backdrop. "
"The composition should be dynamic, focusing on her from a slightly low angle to emphasize her dominance. The color "
"palette should be a blend of cold metallic shades and warmer tones from her leather armor, creating a vivid contrast "
"that underscores her determination and grit. The lighting should be dramatic, highlighting her features and the "
"textures of her gear, enhancing the overall cyberpunk aesthetic."
)
user_prompt = f"Character story: {character_story}"
response: ChatCompletion = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
)
return str(response.choices[0].message.content)
| [
"Generate a comprehensive and vivid visual concept art of a character for a piece of artwork. The character should fit within a distinct theme and style, and the description must be detailed enough to guide an artist in creating a dynamic and engaging image.Here are the guidelines for your description:Theme and Setting: Choose an intriguing theme and setting for the character. It could be anything from a dystopian future to a fantasy world. Describe the setting in a way that complements the character's story and personality.Character Details:Physical Appearance: Provide a detailed description of the character's physical features, including hair, eyes, skin, and build.Expression and Posture: Convey the character's mood or personality through their expression and posture.Attire and Equipment: Describe the character's clothing and any distinctive equipment they might carry, do NOT use proper noun, describe visually what the items look like.Artistic Style: Specify the desired artistic style for the portrayal. The starting point is : PLACEHOLDER, make sure to detail the stylistic elements that should be emphasized.Composition and Color Palette: Suggest a striking composition for the artworkDescribe the character stanceDescribe the color palette, considering how colors can reflect the character's traits or the mood of the setting.Extract up to 8 keys focusing on the art style and compositionUse these guidelines to create a structured and detailed visual description for a character based on the following origin story:Focus on making the description as vivid and detailed as possible, so it can easily be translated into a stunning piece of art.An example of a good concept art result:Keys: Commanding presence, Dynamic composition, Low angle perspective, Cold metallic shades, Warm leather tones, Dramatic lighting, Cyberpunk aestheticCharacter Details: She is light-skinned with a muscular build, short blonde hair, and piercing light-colored eyes that radiate intelligence and cunning. Her expression is one of chilling neutrality, a reflection of her spirit shaped by the cold, ruthless Arctic.Attire and Equipment: Her attire combines functionality with a touch of brutality โ a sleek, black chest armor that bulges with the strength of her physique, complemented by large shoulder pads. Her arms are covered with highly detailed armor, and her legs are clad in thigh-high boots with sturdy knee pads. Fortified gloves adorn her hands. In one hand, she deftly holds a leather whip, an emblem of elegance and cruelty, while her other hand grips a robust submachine gun. Around her waist are vials containing clear liquid and spherical objects reminiscent of primitive grenades, adding to her enigmatic persona. A handle and a battle axe, symbols of her defiance and skill, are fastened at her side.Setting: The backdrop is a post-apocalyptic Arctic tundra, subtly hinting at her origins. The environment should be bleak yet captivating, with remnants of a once-thriving world now lost to chaos and rebellion.Artistic Style and Composition: The portrait should capture her commanding presence amidst this desolate backdrop. The composition should be dynamic, focusing on her from a slightly low angle to emphasize her dominance. The color palette should be a blend of cold metallic shades and warmer tones from her leather armor, creating a vivid contrast that underscores her determination and grit. The lighting should be dramatic, highlighting her features and the textures of her gear, enhancing the overall cyberpunk aesthetic.",
"Character story: PLACEHOLDER"
] |
2024-01-10 | romain-cambonie/openxcom-mod-generator | src~dalle~call_dalle_and_save_image.py | import requests
from openai import OpenAI
from pathlib import Path
from typing import Optional
from openai.types import ImagesResponse
def call_dalle_and_save_image(prompt: str, client: OpenAI, output_file_path: Path) -> Optional[Path]:
try:
# Generate image using OpenAI client
response: ImagesResponse = client.images.generate(
prompt=prompt, n=1, model="dall-e-3", size="1024x1024", quality="hd", response_format="url"
)
# Extract the image URL
image_url = response.data[0].url
if not image_url:
print("No image URL found in the response.")
return None
print(image_url)
# Download the image
image_response = requests.get(image_url)
if image_response.status_code == 200:
# Write the image data to a file
with open(output_file_path, "wb") as file:
file.write(image_response.content)
return output_file_path
else:
print(f"Error downloading image: {image_response.status_code}")
return None
except Exception as e:
print(f"An error occurred: {e}")
return None
| [] |
2024-01-10 | romain-cambonie/openxcom-mod-generator | src~chat~ask_for_dalle_character_prompt.py | from openai import OpenAI
from openai.types.chat import ChatCompletion
def ask_for_dalle_character_prompt(
client: OpenAI,
concept_art_description: str,
) -> str:
system_prompt = (
"You're given a detailed concept art description of a character. Your task is to condense this description into a "
"succinct, vivid DALL-E prompt."
"The DALL-E prompt should accurately capture the key visual elements and artistic style described in the concept art, "
"while being concise enough for effective image generation. "
"Here is the concept art description to be transformed into a DALL-E prompt:\n"
f"{concept_art_description}\n"
"Based on this description, refine this concept into a DALL-E prompt that contains, in order references to the art "
"style, composition, subject, location, colors;"
"The prompt must not be more than 130 words, encapsulating the essence of the concept art."
f"The prompt must start with the keys of the concept art"
)
user_prompt = "Transform the above concept art description into a succinct DALL-E prompt."
response: ChatCompletion = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
)
return str(response.choices[0].message.content)
| [
"Transform the above concept art description into a succinct DALL-E prompt.",
"You're given a detailed concept art description of a character. Your task is to condense this description into a succinct, vivid DALL-E prompt.The DALL-E prompt should accurately capture the key visual elements and artistic style described in the concept art, while being concise enough for effective image generation. Here is the concept art description to be transformed into a DALL-E prompt:\nPLACEHOLDER\nBased on this description, refine this concept into a DALL-E prompt that contains, in order references to the art style, composition, subject, location, colors;The prompt must not be more than 130 words, encapsulating the essence of the concept art.The prompt must start with the keys of the concept art"
] |
2024-01-10 | romain-cambonie/openxcom-mod-generator | src~chat~ask_for_origin_story.py | from openai import OpenAI
from openai.types.chat import ChatCompletion
def ask_for_origin_story(
client: OpenAI,
character_name: str,
equipment_description: str,
appearance_description: str,
) -> str:
system_prompt = (
"You are tasked with creating a short origin story for a fictional character. "
"You will receive three key pieces of information: (1) the character's name, "
"(2) a YAML payload detailing the character's equipment, and "
"(3) an image that shows some characteristics of the character's appearance. "
"Your job is to weave these elements together into a compelling and imaginative origin story. "
"The story should be concise, no more than a few paragraphs, and should creatively incorporate specific details from "
"the YAML payload and the visual cues from the image. "
"The tone and style of the story should align with the genre suggested by the character's name and appearance. "
"Be imaginative and ensure that the equipment and visual traits play a significant role in the character's background "
"and the events that shaped them."
"Pay special attention to match all visual description details such as gender, race, skin color, hair color and so on "
)
user_prompt = (
f"Character Name: {character_name}\n\nEquipment: {equipment_description}\n\nAppearance: "
f"{appearance_description}\n\nBased on the above details, create a short origin story for the character."
)
response: ChatCompletion = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
)
return str(response.choices[0].message.content)
| [
"You are tasked with creating a short origin story for a fictional character. You will receive three key pieces of information: (1) the character's name, (2) a YAML payload detailing the character's equipment, and (3) an image that shows some characteristics of the character's appearance. Your job is to weave these elements together into a compelling and imaginative origin story. The story should be concise, no more than a few paragraphs, and should creatively incorporate specific details from the YAML payload and the visual cues from the image. The tone and style of the story should align with the genre suggested by the character's name and appearance. Be imaginative and ensure that the equipment and visual traits play a significant role in the character's background and the events that shaped them.Pay special attention to match all visual description details such as gender, race, skin color, hair color and so on ",
"Character Name: PLACEHOLDER\n\nEquipment: PLACEHOLDER\n\nAppearance: PLACEHOLDER\n\nBased on the above details, create a short origin story for the character."
] |
2024-01-10 | outlines-dev/outlines | outlines~models~__init__.py | """Module that contains all the models integrated in outlines.
We group the models in submodules by provider instead of theme (completion, chat
completion, diffusers, etc.) and use routing functions everywhere else in the
codebase.
"""
from .awq import awq
from .exllamav2 import exl2
from .gptq import gptq
from .llamacpp import LlamaCpp, llamacpp
from .mamba import Mamba, mamba
from .openai import OpenAI, openai
from .transformers import Transformer, transformers
| [] |
2024-01-10 | ball2004244/Pinecone-Hackathon-23-Backend | logic~pinecone_db.py | '''
This file contains the logic for storing and querying data from Pinecone.
'''
from typing import List
from langchain.vectorstores import Pinecone
from langchain.chains.summarize import load_summarize_chain
from langchain.llms import GooglePalm
from langchain.embeddings.google_palm import GooglePalmEmbeddings
from langchain.schema import Document
import pinecone
from pinecone import DescribeIndexStatsResponse
class PineconeTrainer:
def __init__(self, gcp_api_key: str, pinecone_api_key: str, pinecone_environment: str):
self.gcp_api_key = gcp_api_key
self.pinecone_api_key = pinecone_api_key
self.pinecone_environment = pinecone_environment
self.palm_config = {
'temperature': 0.7,
'google_api_key': self.gcp_api_key,
}
self.index_name = 'paragraph-summarizer'
self.llm = GooglePalm(**self.palm_config)
self.chain = load_summarize_chain(self.llm, chain_type='stuff')
self.embeddings = GooglePalmEmbeddings(**self.palm_config)
self.pinecone_init(self.index_name, 'cosine', 768)
def pinecone_init(self, index_name: str, metric: str, dimension: int) -> None:
pinecone.init(
api_key=self.pinecone_api_key,
environment=self.pinecone_environment,
)
# check if index exists
if index_name not in pinecone.list_indexes():
pinecone.create_index(name=index_name, metric=metric, dimension=dimension)
self.index = pinecone.Index(index_name=index_name)
self.vectordb = Pinecone(index=self.index, embedding_function=self.embeddings.embed_query, text_key='text')
def add_data(self, input_list: List[str]=[]) -> None:
document_list = [Document(page_content=input_list[i]) for i in range(len(input_list))]
self.vectordb = Pinecone.from_documents(document_list, embedding=self.embeddings, index_name=self.index_name)
print('Data added successfully!, %s vectors added' % len(input_list))
def delete_all_data(self) -> None:
pass
def query(self, query: str=' ', question: str='Summarize in 3 sentences') -> str:
search = self.vectordb.similarity_search(query=query, k=3)
summary = self.chain.run(input_documents=search, question=question)
return summary
def get_index_info(self) -> DescribeIndexStatsResponse:
index = pinecone.GRPCIndex(self.index_name)
output = index.describe_index_stats()
return output
def embed_text(self, text: str) -> List[float]:
return self.embeddings.embed_query(text)
def pinecone_train(self, input_file: str) -> None:
try:
input_list = self.extract_input_text(input_file)
self.add_data(input_list)
except Exception as e:
print(e)
@staticmethod
def extract_input_text(input_file: str) -> List[str]:
from logic.data_extract import extract_data, extract_text
data = extract_data(input_file)
texts = extract_text(data)
return texts
@staticmethod
def extract_output_text(input_file: str) -> List[str]:
from logic.data_extract import extract_data, extract_output_text
data = extract_data(input_file)
texts = extract_output_text(data)
return texts
if __name__ == '__main__':
pass | [] |
2024-01-10 | TheoKanning/crossword | crossword~clues.py | import json
import os
import openai
def convert_raw_clues(raw_filename, output_filename):
"""
Reads raw clue info from raw_filename, formats it to match GPT-3's fine-tune input, and writes it to output_filename
Raw clues are formatted like "Up in the air : ALOFT"
"""
with open(output_filename, "w+") as f_out:
f_out.write("farts")
with open(raw_filename, "r") as f_in:
with open(output_filename, "w+") as f_out:
for line in f_in.readlines():
line = line.strip()
if not line:
continue
if line.isnumeric():
# This line is a clue number, ignore it
continue
if line.lower() == "down" or line.lower() == "across":
continue
components = line.rsplit(
":", 1
) # split from end to handle colons inside clues
if len(components) != 2:
print(line)
continue
clue = components[0].strip()
answer = components[1].strip()
f_out.write(
json.dumps(
{
"prompt": f"Answer: {answer.lower()}\nClue:",
"completion": f" {clue}\n",
}
)
)
f_out.write("\n")
def get_clue(answer):
prompt = f"Answer: {answer.lower()}\nClue:"
openai.api_key = os.getenv("OPENAI_API_KEY")
result = openai.Completion.create(
model="curie:ft-personal-2022-04-30-18-38-57", prompt=prompt, stop="\n", n=5
)
print(f"Answer: {answer}\nClues:")
for choice in result["choices"]:
print(choice["text"])
if __name__ == "__main__":
get_clue("")
# convert_raw_clues("../clues/raw_clues.txt", "../clues/formatted.jsonl")
| [
"f\"Answer: {answer.lower()}\\nClue:"
] |
2024-01-10 | NusretOzates/langchain_retrieval_qa_bot | data_loaders.py | import re
from itertools import chain
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders import PyPDFLoader, TextLoader, UnstructuredURLLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.vectorstores.base import VectorStoreRetriever
def load_text_file(file_path: str) -> Document:
"""Loads a text file and returns a Document object.
Args:
file_path: Path to the text file.
Returns:
A Document object.
"""
doc = TextLoader(file_path, encoding="utf-8").load()[0]
return doc
def load_pdf_file(file_path: str) -> List[Document]:
"""Loads a pdf file and returns a list of Document objects.
Args:
file_path: Path to the pdf file.
Returns:
A list of Document objects. Every page in the pdf file is a Document object.
"""
loader = PyPDFLoader(file_path)
docs = loader.load()
return docs
def load_website(url: str) -> List[Document]:
"""Loads a website and returns a Document object.
Args:
url: Url of the website.
Returns:
A Document object.
"""
documents = UnstructuredURLLoader(
[url],
mode="elements",
headers={
"ssl_verify": "False",
},
).load()
processed_docs = []
# We are not rich, we need to eliminate some of the elements
for doc in documents:
# This will make us lose table information sorry about that :(
if doc.metadata.get("category") not in [
"NarrativeText",
"UncategorizedText",
"Title",
]:
continue
# Remove elements with empty links, they are mostly recommended articles etc.
if doc.metadata.get("links"):
link = doc.metadata["links"][0]["text"]
if link is None:
continue
link = link.replace(" ", "").replace("\n", "")
if len(link.split()) == 0:
continue
# Remove titles with links, they are mostly table of contents or navigation links
if doc.metadata.get("category") == "Title" and doc.metadata.get("links"):
continue
# Remove extra spaces
doc.page_content = re.sub(" +", " ", doc.page_content)
# Remove docs with less than 3 words
if len(doc.page_content.split()) < 3:
continue
processed_docs.append(doc)
# Instead of splitting element-wise, we merge all the elements and split them in chunks
merged_docs = "\n".join([doc.page_content for doc in processed_docs])
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
processed_docs = splitter.split_text(merged_docs)
processed_docs = [
Document(page_content=doc, metadata={"url": url}) for doc in processed_docs
]
return processed_docs
def load_text_files(file_paths: List[str]) -> List[Document]:
"""Loads a list of text files and returns a list of Document objects.
Args:
file_paths: List of paths to the text files.
Returns:
A list of Document objects.
"""
docs = [load_text_file(file_path) for file_path in file_paths]
return docs
def load_pdf_files(file_paths: List[str]) -> List[Document]:
"""Loads a list of pdf files and returns a list of Document objects.
Args:
file_paths: List of paths to the pdf files.
Returns:
A list of Document objects. Every page in the pdf file is a Document object.
"""
docs = [load_pdf_file(file_path) for file_path in file_paths]
docs = list(chain.from_iterable(docs))
return docs
def create_index(docs: List[Document]) -> VectorStoreRetriever:
"""Creates a vectorstore index from a list of Document objects.
Args:
docs: List of Document objects.
Returns:
A vectorstore index. It searches the most similar document to the given query but with
the help of MMR it also tries to find the most diverse document to the given query.
"""
index = VectorstoreIndexCreator(
vectorstore_cls=DocArrayInMemorySearch,
text_splitter=RecursiveCharacterTextSplitter(
chunk_size=1000, chunk_overlap=100
),
).from_documents(docs)
return index.vectorstore.as_retriever(search_type="mmr")
| [] |
2024-01-10 | Antrozhuk/telegramChatGPTBot | src~telegram_bot.py | import telegram.constants as constants
from telegram import Update
from telegram.ext import ApplicationBuilder, ContextTypes, CommandHandler, MessageHandler, filters
from src.openai_helper import OpenAIHelper
from src.logger import Logger
class ChatGPT3TelegramBot:
"""
Class representing a Chat-GPT3 Telegram Bot.
"""
def __init__(self, config: dict, openai: OpenAIHelper):
"""
ะะฝัััะฐะปัะทัั ะฑะพั ะบะพะฝััะณััะฐัััั ัะฐ GPT-3 ะฝะฐะปะฐัััะฒะฐะฝะฝัะผะธ.
:param config: ะกะปะพะฒะฝะธะบ ะท ะบะพะฝััะณััะฐัััั ะฑะพัะฐ
:param openai: OpenAIHelper ะพะฑสผัะบั
:param disallowed_message: ะะพะฒัะดะพะผะปะตะฝะฝั ะฟัะพ ะฒัะดัััะฝัััั ะดะพัััะฟั
"""
self.config = config
self.openai = openai
self.logger = Logger('telegram_bot').get_logger()
self.disallowed_message = "ะะธะฑะฐััะต, ะฐะปะต ะฒะฐะผ ะฝะต ะดะพะทะฒะพะปะตะฝะพ ะบะพัะธัััะฒะฐัะธัั ัะธะผ ะฑะพัะพะผ."
async def start(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
ะะพะบะฐะทัั ะฟะพัะฐัะบะพะฒะต ะฟะพะฒัะดะพะผะปะตะฝะฝั.
"""
if await self.disallowed(update, context):
return
await update.message.reply_text("ะัะธะฒัั! ะฏ ะฑะพั, ัะบะธะน ะฒัะดะฟะพะฒัะดะฐั ะฝะฐ ะฒะฐัั ะฟะพะฒัะดะพะผะปะตะฝะฝั ะทะฐ ะดะพะฟะพะผะพะณะพั ChatGPT-3.\n"
"ะฏะบัะพ ะฒะธ ั
ะพัะตัะต ะดัะทะฝะฐัะธัั ะฑัะปััะต ะฟัะพ ะผะตะฝะต, ะฒะฒะตะดััั /help\n\n",
disable_web_page_preview=True)
async def help(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
ะะพะบะฐะทัั ะดะพะฟะพะผัะถะฝะต ะฟะพะฒัะดะพะผะปะตะฝะฝั.
"""
if await self.disallowed(update, context):
return
await update.message.reply_text("[ะัะดั ัะบะต ะฟะพะฒัะดะพะผะปะตะฝะฝั] - ะัะดะฟัะฐะฒะปัั ะฒะฐัะต ะฟะพะฒัะดะพะผะปะตะฝะฝั ะดะพ AI\n"
"/help - ะะตะฝั ะฟะพะผััะฝะธะบะฐ\n"
"/random_answer - ะะตะฝะตััั ัะฐะฝะดะพะผะฝั ะฒัะดะฟะพะฒัะดั\n"
"/random_post - ะะตะฝะตััั ัะฐะฝะดะพะผะฝะธะน ะฟะพัั\n"
"/reset - ะะฝะพะฒะปัั ะฑะตััะดั\n\n",
disable_web_page_preview=True)
async def reset(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
ะะฝะพะฒะปัั ะฑะตััะดั.
"""
if await self.disallowed(update, context):
return
self.logger.info(f'Resetting the conversation for {update.message.from_user}...')
chat_id = update.effective_chat.id
self.openai.reset_chat_history(chat_id=chat_id)
await context.bot.send_message(chat_id=chat_id, text='ะะพัะพะฒะพ!')
async def prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
React to incoming messages and respond accordingly.
"""
if await self.disallowed(update, context):
return
self.logger.info(f'New message "{update.message.text}" received from {update.message.from_user}')
chat_id = update.effective_chat.id
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.TYPING)
response = self.openai.get_chat_response(chat_id=chat_id, query=update.message.text)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.id,
parse_mode=constants.ParseMode.MARKDOWN,
text=response
)
async def random_answer(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
ะัะดะฟัะฐะฒะปัั ัะฐะฝะดะพะผะฝั ะฒัะดะฟะพะฒัะดั.
"""
if await self.disallowed(update, context):
return
self.logger.info(f'random_answer command received from {update.message.from_user}')
chat_id = update.effective_chat.id
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.TYPING)
response = self.openai.get_chat_response(chat_id=chat_id, query='ะฝะฐะฟะธัะธ ัะฐะฝะดะพะผะฝั ะฒัะดะฟะพะฒัะดั')
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.id,
parse_mode=constants.ParseMode.MARKDOWN,
text=response
)
async def random_post(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
ะัะดะฟัะฐะฒะปัั ัะฐะฝะดะพะผะฝะธะน ะฟะพัั.
"""
if await self.disallowed(update, context):
return
self.logger.info(f'random_post command received from {update.message.from_user}')
chat_id = update.effective_chat.id
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.TYPING)
response = self.openai.get_chat_response(chat_id=chat_id, query='ะฝะฐะฟะธัะธ ัะฐะฝะดะพะผะฝะธะน ะฟะพัั ัะบัะฐัะฝััะบะพั')
await context.bot.send_message(
chat_id=chat_id,
parse_mode=constants.ParseMode.MARKDOWN,
text=response
)
async def disallowed(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
ะัะดะฟัะฐะฒะปัั ะฟะพะฒัะดะพะผะปะตะฝะฝั ะฟัะพ ะฒัะดัััะฝัััั ะดะพัััะฟัะฒ ะดะพ ะบะพัะธัััะฒะฐัะฐ.
"""
if not await self.is_allowed(update):
self.logger.warning(f'User {update.message.from_user} is not allowed to use the bot')
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=self.disallowed_message,
disable_web_page_preview=True
)
return True
return False
async def error_handler(self, update: object, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
ะัะดะปะพะฒะปัั ะฒัั ะฟะพะผะธะปะบะธ.
"""
self.logger.debug(f'Exception while handling an update: {context.error}')
async def is_allowed(self, update: Update) -> bool:
"""
ะะตัะตะฒัััั ัะธ ะดะพะทะฒะพะปะตะฝะพ ัะทะตัั ะบะพัะธัััะฒะฐัะธัั ะดะฐะฝะธะผ ะฑะพัะพะผ.
"""
if self.config['allowed_user_ids'] == '*':
return True
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(update.message.from_user.id) in allowed_user_ids:
return True
return False
def run(self):
"""
ะะฐะฟััะบะฐั ะฑะพั ะดะพะบะธ ะบะพัะธัััะฒะฐั ะฝะต ะฝะฐัะธัะฝะต Ctrl+C
"""
application = ApplicationBuilder().token(self.config['token']).build()
application.add_handler(CommandHandler('start', self.start))
application.add_handler(CommandHandler('help', self.help))
application.add_handler(CommandHandler('reset', self.reset))
application.add_handler(CommandHandler('random_answer', self.random_answer))
application.add_handler(CommandHandler('random_post', self.random_post))
application.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), self.prompt))
application.add_error_handler(self.error_handler)
application.run_polling()
| [] |
2024-01-10 | aws-samples/aurora-postgresql-pgvector | DAT303~02_QuestionAndAnswering~rag_app.py | # Import libraries
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.vectorstores.pgvector import PGVector
from langchain.memory import ConversationSummaryBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import BedrockEmbeddings
from langchain.llms import Bedrock
from langchain.prompts import PromptTemplate
import streamlit as st
import boto3
from PIL import Image
import os
import traceback
# TODO: This function takes a list of PDF documents as input and extracts the text from them using PdfReader.
# It concatenates the extracted text and returns it.
# TODO: Given the extracted text, this function splits it into smaller chunks using the RecursiveCharacterTextSplitter module.
# The chunk size, overlap, and other parameters are configured to optimize processing efficiency.
# TODO: This function takes the text chunks as input and creates a vector store using Bedrock Embeddings (Titan) and pgvector.
# The vector store stores the vector representations of the text chunks, enabling efficient retrieval based on semantic similarity.
# TODO: In this function, a conversation chain is created using the conversational AI model (Anthropic's Claude v2), vector store (created in the previous function), and conversation memory (ConversationSummaryBufferMemory).
# This chain allows the Gen AI app to engage in conversational interactions.
# This function is responsible for processing the user's input question and generating a response from the chatbot
def handle_userinput(user_question):
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
if "messages" not in st.session_state:
st.session_state.messages = []
try:
response = st.session_state.conversation({'question': user_question})
except ValueError:
st.write("Sorry, I didn't understand that. Could you rephrase your question?")
print(traceback.format_exc())
return
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.success(message.content, icon="๐ค")
else:
st.write(message.content)
# Streamlit components
def main():
# Set the page configuration for the Streamlit application, including the page title and icon.
st.set_page_config(page_title="Generative AI Q&A with Amazon Bedrock, Aurora PostgreSQL and pgvector",
layout="wide",
page_icon=":books::parrot:")
st.write(css, unsafe_allow_html=True)
logo_url = "static/Powered-By_logo-stack_RGB_REV.png"
st.sidebar.image(logo_url, width=150)
st.sidebar.markdown(
"""
### Instructions:
1. Browse and upload PDF files
2. Click Process
3. Type your question in the search bar to get more insights
"""
)
# Check if the conversation and chat history are not present in the session state and initialize them to None.
if "conversation" not in st.session_state:
st.session_state.conversation = get_conversation_chain(get_vectorstore(None))
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
# A header with the text appears at the top of the Streamlit application.
st.header("Generative AI Q&A with Amazon Bedrock, Aurora PostgreSQL and pgvector :books::parrot:")
subheader = '<p style="font-family:Calibri (Body); color:Grey; font-size: 16px;">Leverage Foundational Models from <a href="https://aws.amazon.com/bedrock/">Amazon Bedrock</a> and <a href="https://github.com/pgvector/pgvector">pgvector</a> as Vector Engine</p>'
# Write the CSS style to the Streamlit application, allowing you to customize the appearance.
st.markdown(subheader, unsafe_allow_html=True)
image = Image.open("static/RAG_APG.png")
st.image(image, caption='Generative AI Q&A with Amazon Bedrock, Aurora PostgreSQL and pgvector')
# Create a text input box where you can ask questions about your documents.
user_question = st.text_input("Ask a question about your documents:", placeholder="What is Amazon Aurora?")
# Define a Go button for user action
go_button = st.button("Submit", type="secondary")
# If the go button is pressed or the user enters a question, it calls the handle_userinput() function to process the user's input.
if go_button or user_question:
with st.spinner("Processing..."):
handle_userinput(user_question)
with st.sidebar:
st.subheader("Your documents")
pdf_docs = st.file_uploader(
"Upload your PDFs here and click on 'Process'", type="pdf", accept_multiple_files=True)
# If the user clicks the "Process" button, the following code is executed:
# i. raw_text = get_pdf_text(pdf_docs): retrieves the text content from the uploaded PDF documents.
# ii. text_chunks = get_text_chunks(raw_text): splits the text content into smaller chunks for efficient processing.
# iii. vectorstore = get_vectorstore(text_chunks): creates a vector store that stores the vector representations of the text chunks.
if st.button("Process"):
with st.spinner("Processing"):
# get pdf text
raw_text = get_pdf_text(pdf_docs)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(vectorstore)
st.success('PDF uploaded successfully!', icon="โ
")
with st.sidebar:
st.divider()
st.sidebar.markdown(
"""
### Sample questions to get started:
1. What is Amazon Aurora?
2. How can I migrate from PostgreSQL to Aurora and the other way around?
3. What does "three times the performance of PostgreSQL" mean?
4. What is Aurora Standard and Aurora I/O-Optimized?
5. How do I scale the compute resources associated with my Amazon Aurora DB Instance?
6. How does Amazon Aurora improve my databases fault tolerance to disk failures?
7. How does Aurora improve recovery time after a database crash?
8. How can I improve upon the availability of a single Amazon Aurora database?
"""
)
if __name__ == '__main__':
# This function loads the environment variables from a .env file.
load_dotenv()
# Define the Bedrock client.
BEDROCK_CLIENT = boto3.client("bedrock-runtime", 'us-west-2')
# Create the connection string for pgvector from .env file.
CONNECTION_STRING = PGVector.connection_string_from_db_params(
driver = os.environ.get("PGVECTOR_DRIVER"),
user = os.environ.get("PGVECTOR_USER"),
password = os.environ.get("PGVECTOR_PASSWORD"),
host = os.environ.get("PGVECTOR_HOST"),
port = os.environ.get("PGVECTOR_PORT"),
database = os.environ.get("PGVECTOR_DATABASE")
)
main()
| [] |
2024-01-10 | WuQingYi20/InteractiveStory | wsgi.py | from flask import Flask, render_template, jsonify, request
import openai
import re
from prompts import prompts
from dotenv import load_dotenv
import os
# Load the .env file
load_dotenv()
app = Flask(__name__)
initialCall = True
currentDescription = ""
# Initialize OpenAI API with your API key
openai.api_key = os.getenv('OPENAI_API_KEY')
# Define a dictionary to store user progress data
user_data = {}
# Global variable to track initialization status
initialized = False
@app.route('/')
def index():
global initialized
global currentDescription
if initialized:
# Initialization has already been done, return JSON response
if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
return jsonify(story=user_data['story'], choices=user_data['choices'])
# Initialization has already been done, return HTML response
else:
return render_template('index.html', story=user_data['story'], choices=user_data['choices'])
else:
# Initialization code
systemRoleAuto = prompts['index']['System']
promptStory = prompts['index']['story']
storyResponse = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{systemRoleAuto}"},
{"role": "user", "content": f"{promptStory}"},
#{"role": "assistant", "content": f"{contentAssistant}"},
],
max_tokens= 1500,
)
story = storyResponse.choices[0].message['content']
currentDescription = story
choicesPrompt = prompts['index']['choices']
choiceResponse = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{systemRoleAuto}"},
{"role": "user", "content": f"{story} {choicesPrompt}"},
#{"role": "assistant", "content": f"{contentAssistant}"},
],
max_tokens= 1500,
)
#Insert <p> tags around each paragraph
formatted_story = format_story(story)
user_data['story'] = formatted_story
user_data['choices'] = choiceResponse.choices[0].message['content']
initialized = True
if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
return jsonify(story=story, choices=user_data['choices'])
else:
return render_template('index.html', story=story, choices=user_data['choices'])
# Define a route to handle user choices and update the story
@app.route('/next-page/<choice>')
def next_page(choice):
systemRoleAuto = prompts['next-page']['System']
originalStory = user_data['story'] + "\n" + choice
contentAssistant = prompts['next-page']['storyAssistant']
contentAssistantChoices = prompts['next-page']['choicesAssistant']
prompt_story = originalStory + "\n" + prompts['next-page']['story']
response_story = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{systemRoleAuto}"},
{"role": "user", "content": f"{prompt_story}"},
{"role": "assistant", "content": f"{contentAssistant}"},
],
max_tokens= 1500,
)
prompt_choices = originalStory + response_story.choices[0].message['content'] + "\n" + prompts['next-page']['choices']
response_choices = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{systemRoleAuto}"},
{"role": "user", "content": f"{prompt_choices}"},
{"role": "assistant", "content": f"{contentAssistantChoices}"},
],
max_tokens= 1500,
)
story = response_story.choices[0].message['content']
choices = response_choices.choices[0].message['content']
# get summary of previous story and actions by gpt-3.5-turbo and original story
prompt_summary = originalStory + "\n" + prompts['next-page']['summary']
response_summary = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{systemRoleAuto}"},
{"role": "user", "content": f"{prompt_summary}"},
#{"role": "assistant", "content": f"{contentAssistant}"},
],
max_tokens= 1500,
)
formatted_story = format_story(story)
user_data['story'] = formatted_story
user_data['choices'] = choices
user_data['summary'] = response_summary.choices[0].message['content']
return jsonify(story=formatted_story, choices=choices, summary=user_data['summary'])
def format_story(story):
# Split the text into paragraphs using a regular expression
paragraphs = re.split(r"\n\s*\n", story)
#Insert <p> tags around each paragraph
formatted_story = "\n".join([f"<p>{paragraph}</p>" for paragraph in paragraphs])
return formatted_story
if __name__ == '__main__':
app.run(debug=True)
| [
"\n",
"PLACEHOLDER PLACEHOLDER",
"PLACEHOLDER",
"originalStory + \"\\n\" + prompts['next-page']['story']",
"next-page",
"originalStory + \"\\n\" + prompts['next-page']['summary']",
"originalStory + response_story.choices[0].message['content'] + \"\\n\" + prompts['next-page']['choices']",
"content",
"index"
] |
2024-01-10 | yamdereneko/ymbot | src~chatGPT~Chat_GPT_API.py | # -*- coding: utf-8 -*-
import asyncio
import nonebot
from pydantic import BaseModel
from httpx import AsyncClient
import src.Data.jx3_Redis as redis
import openai
class Response(BaseModel):
"""่ฟๅๆฐๆฎๆจกๅ"""
id: str
"""็ถๆ็ """
object: str
created: int
model: str
choices: list
"""่ฟๅๆถๆฏๅญ็ฌฆไธฒ"""
usage: dict | list[dict]
"""่ฟๅๆฐๆฎ"""
class ChatGPTAPI:
client: AsyncClient
def __init__(self):
proxy_url = "http://username:password@127.0.0.1:8888"
proxies = {"http": proxy_url, "https": proxy_url}
self.client = AsyncClient(proxies=proxies)
self.url = "https://api.openai.com/v1/completions"
async def call_api(self, content) -> Response:
red = redis.Redis()
chat_gpt_apikey = await red.query("chat_gpt_apikey")
Organization = await red.query("OpenAI-Organization")
"""่ฏทๆฑapi็ฝ็ซๆฐๆฎ"""
headers = {
'Authorization': f'Bearer {chat_gpt_apikey}',
'OpenAI-Organization': Organization,
'Content-Type': 'application/json'
}
data = {
"model": "gpt-3.5-turbo",
"messages": [{"role": "user", "content": content}]
}
res = await self.client.post(url=self.url, json=data, headers=headers, timeout=3000)
print(res)
nonebot.logger.info(res.text)
return Response.parse_obj(res.json()) | [] |
2024-01-10 | kaistAI/SelFee | data_augmentation~call_openai_multiprocessing_sharegpt.py | from concurrent.futures import ProcessPoolExecutor
import argparse
import multiprocessing
import openai
from time import sleep
from random import random
import nltk
nltk.download('punkt')
from nltk import tokenize
import json
import fcntl
from typing import List
import os
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
RetryError
) # for exponential backoff
API_KEYS = os.environ["OPENAI_API_KEYS"].split(",")
MAX_WAIT_TIME=1
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input-path", type=str, required=True)
parser.add_argument("--output-path", type=str, required=True)
parser.add_argument("--fail-path", type=str, required=True)
parser.add_argument("--requests-per-minute", type=int, default=60, help="Number of requests per minute per API key")
parser.add_argument("--input-streaming", action="store_true")
parser.add_argument("--limit", type=int, default=None)
args = parser.parse_args()
if not args.input_streaming:
no_streaming(args.input_path, args.output_path, args.fail_path, args.requests_per_minute, args.limit)
else:
streaming(args.input_path, args.output_path, args.fail_path, args.requests_per_minute, args.limit)
def no_streaming(input_path, output_path, fail_path, requests_per_minute, limit):
input_items = []
input_format = input_path.split(".")[-1]
with open(input_path, "r") as input_file:
if input_format == "jsonl":
for line in input_file:
input_items.append(json.loads(line))
elif input_format == "json":
input_items = json.load(input_file)
else:
raise ValueError(f"Unknown input format: {input_format}")
if limit is not None:
input_items = input_items[:limit]
process(input_items, output_path, fail_path, requests_per_minute)
def streaming(input_path, output_path, fail_path, requests_per_minute, limit):
last_modified = None
last_items = None
while True:
stats = os.stat(input_path)
modified = stats.st_mtime
if last_modified is not None and last_modified == modified:
sleep(1)
continue
input_items = []
input_format = input_path.split(".")[-1]
with open(input_path, "r") as input_file:
if input_format == "jsonl":
for line in input_file:
input_items.append(json.loads(line))
elif input_format == "json":
input_items = json.load(input_file)
else:
raise ValueError(f"Unknown input format: {input_format}")
if limit is not None:
input_items = input_items[:limit]
if last_items is not None:
added_items = [item for item in input_items if item not in last_items]
else:
added_items = input_items
process(added_items, output_path, fail_path, requests_per_minute)
last_modified = modified
def process(input_items, output_path, fail_path, requests_per_minute):
num_api_keys = len(API_KEYS)
requests_per_minute_total = requests_per_minute * num_api_keys
with ProcessPoolExecutor(max_workers=num_api_keys * requests_per_minute) as executor:
for item_index, item in enumerate(input_items):
api_key = API_KEYS[item_index % num_api_keys]
executor.submit(call_api_and_save, api_key=api_key, item=item, output_path=output_path, fail_path=fail_path)
sleep(1 / requests_per_minute_total * 60)
def call_api_and_save(api_key: str, item: dict, output_path: str, fail_path: str):
try:
output_item = call_api(api_key, item)
success = True
except Exception as e:
success = False
if success:
output_line = json.dumps(output_item)
with open(output_path, "a") as output_file:
fcntl.flock(output_file, fcntl.LOCK_EX)
output_file.write(output_line + "\n")
fcntl.flock(output_file, fcntl.LOCK_UN)
else:
fail_line = json.dumps(item)
with open(fail_path, "a") as fail_file:
fcntl.flock(fail_file, fcntl.LOCK_EX)
fail_file.write(fail_line + "\n")
fcntl.flock(fail_file, fcntl.LOCK_UN)
def call_api(api_key: str, task: str):
openai.api_key = api_key
last_flag = False
item = {}
item["iteration_truncated"]=False
print('Testing %s ...' % task)
# print('testing index %d-------------------------------------------------------------------------------------'%index)
instruction = task["instruction"]
input = task["input"]
item['instruction']=instruction
item['input']=input
answer = task["output"]
outputs=[]
message=[]
if input == "":
context = f"{instruction}\n"
else:
context = f"{instruction}\n{input}\n"
#0
message.append({"role": "user", "content": f"{context}"})
#3 7
# message.append({"role": "assistant", "content": answer})
message=[{"role": "user", "content": f"{context}Here is a proposed answer:\n{answer}\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:"}]
# Describe possible feedbacks that can improve the answer. The feedback should be under 100 words. If there is nothing to improve, just say \"No Feedback\".Feedback:"})
# source ="Instruction: " + task["instruction"] + '\Answer: ' + output + "\n\nAre there any comments or feedbacks for the above answer? If so, write one within 100 words. Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end.\n\Feedback1:"
# print(source)
# message.append({"role": "user", "content": instruction+":\n"+input})
# message.append({"role":"user","content":"Are there any comments or critiques for the above answer? If so, write one within 100 words. If not, just say \"Revision is not needed\"."})
feedback = completion_with_backoff(
model="gpt-3.5-turbo",
messages = message,
temperature=1.0,
max_tokens=128,
top_p=1,
frequency_penalty=0,
presence_penalty=0
# stop=["\n\n"]
)
feedback = feedback['choices'][0]['message']['content']
#1: 0
message.append({"role": "assistant", "content": feedback})
outputs.append({"output":answer, "feedback":feedback})
print("feedback:", feedback)
iteration=1
while("no critique" not in message[-1]["content"].lower()) and ("no revision" not in message[-1]["content"].lower()) and ("no need" not in message[-1]["content"].lower()) and ("not needed" not in message[-1]["content"].lower()):
if iteration>=5:
item["iteration_truncated"]=True
break
#2 6
if last_flag == False:
message.append({"role":"user","content":"Revise the answer based on your own critique within 500 words. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake. Write as if the revised answer is the first try.\nRevision:"})
else:
message.append({"role":"user","content":"Revise the answer based on your own critique within 500 words. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake.\nRevision:"})
answer = completion_with_backoff(
model="gpt-3.5-turbo",
messages = message,#0,1,2 6,5,4
temperature=1.0,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)['choices'][0]['message']['content']
if ("N/A" in answer):
break
iteration +=1
while answer!=delete_revision_word_from_last(answer):
answer = delete_revision_word_from_last(answer)
answer_split = answer.split('.')
if "i apologize" in answer_split[0].lower():
answer_split=answer_split[1:]
answer = ('.').join(answer_split)
print("answer:", answer)
#3 7
message.append({"role": "assistant", "content": answer})
#4 8
if iteration>2:
last_flag = True
if last_flag ==False:
message.append({"role": "user", "content": f"{context}Here is a proposed answer:\n{answer}\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:"})
else:
message=[{"role": "user", "content": f"{context}Here is a proposed answer:\n{answer}\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:"}]
#message=[{"role":"user","content":f"{context}Here is a proposed answer:\n{answer}\nDescribe possible feedbacks that can improve the answer. The feedback should be under 100 words. If there is nothing to improve, just say \"No Feedback\".Feedback:"}]
feedback = completion_with_backoff(
model="gpt-3.5-turbo",
messages = message,#4
temperature=1.0,
max_tokens=128,
top_p=1,
frequency_penalty=0,
presence_penalty=0
# stop=["\n\n"]
)['choices'][0]['message']['content']
feedback_sentence= feedback.split('.')
if "but" in feedback_sentence[-1].lower() and("revision is needed" in feedback_sentence[-1].lower() or "revision is not needed"in feedback_sentence[-1].lower() ):
if"revision is needed" in feedback_sentence[-1].lower() :
last_sentence = "Revision is needed"
else:
last_sentence = "Revision is not needed"
feedback_sentence[-1] = last_sentence
feedback=('.').join(feedback_sentence)
print("feedback:", feedback)
#5
message.append({"role": "assistant", "content": feedback})
outputs.append({"output":answer, "feedback":feedback})
item["iteration"]=iteration
item['outputs']=outputs
return item
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def completion_with_backoff(**kwargs):
try:
return openai.ChatCompletion.create(**kwargs)
except Exception as e:
print('-------------------------------------------------------------------------------------')
print(e)
print("kwargs", kwargs)
print("API key", openai.api_key)
print('-------------------------------------------------------------------------------------')
raise e
def delete_revision_word_from_last(answer):
line_split = answer.split('\n')
if len(line_split)>0:
original = ('\n').join(line_split[:-1])
sentence_split = tokenize.sent_tokenize(line_split[-1])
if len(sentence_split)>0:
if "revision " in sentence_split[-1]:
add_sentence = ('').join(sentence_split[:-1])
answer=original+'\n'+add_sentence
else:
answer= ('\n').join(line_split[:-1])
return answer
if __name__ == "__main__":
main()
| [
"Revise the answer based on your own critique within 500 words. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake. Write as if the revised answer is the first try.\nRevision:",
"PLACEHOLDER",
"Revise the answer based on your own critique within 500 words. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake.\nRevision:",
"PLACEHOLDERHere is a proposed answer:\nPLACEHOLDER\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:"
] |
2024-01-10 | kaistAI/SelFee | evaluation~gpt4_automatic_evaluation.py | """This code is sourced from 4960ca7 commit of https://github.com/lm-sys/FastChat/blob/main/fastchat/eval/eval_gpt_review.py"""
import argparse
import json
import os
import time
import openai
import tqdm
import ray
import shortuuid
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
MAX_API_RETRY = 5
REQ_TIME_GAP = 10
@ray.remote(num_cpus=4)
def get_eval(sys_prompt, user_prompt: str, max_tokens: int):
logging.basicConfig(level=logging.INFO)
for i in range(MAX_API_RETRY):
try:
response = openai.ChatCompletion.create(
model='gpt-4',
messages=[{
'role': 'system',
'content': sys_prompt
}, {
'role': 'user',
'content': user_prompt,
}],
temperature=0.2, # TODO: figure out which temperature is best for evaluation
max_tokens=max_tokens,
)
content = response['choices'][0]['message']['content']
logger.info(content)
return content
except Exception as e:
logger.error(e)
time.sleep(5)
logger.error(f'Failed after {MAX_API_RETRY} retries.')
return 'error'
def parse_score(review):
try:
score_pair = review.split('\n')[0]
score_pair = score_pair.replace(',', ' ')
sp = score_pair.split(' ')
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
score_pair = review.split('\n')[-1]
score_pair = score_pair.split('(')[-1].split(')')[0]
print(score_pair)
sp = score_pair.split(', ')
print(sp)
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
raise Exception('Invalid score pair.')
except Exception as e:
logger.error(f'{e}\nContent: {review}\n'
'You must manually fix the score pair.')
return [-1, -1]
def gen_prompt(reviewer_jsons, prompt_jsons, cat, ques, ans1, ans2):
# Default to general category (index=0)
reviewer_idx = 0
for idx, reviewer in enumerate(reviewer_jsons):
if reviewer['category'] == cat:
reviewer_idx = idx
break
prompt_id = reviewer_jsons[reviewer_idx]['prompt_id']
prompt_json = prompt_jsons[prompt_id-1]
assert prompt_json['prompt_id'] == prompt_id
sys_prompt = prompt_json['system_prompt']
prompt_template = prompt_json['prompt_template']
defaults = prompt_json['defaults']
prompt = prompt_template.format(question=ques, answer_1=ans1, answer_2=ans2, **defaults)
return sys_prompt, prompt, reviewer_idx+1
def get_json_list(file_path):
file_path = os.path.expanduser(file_path)
with open(file_path, 'r') as f:
json_list = []
for line in f:
json_list.append(json.loads(line))
return json_list
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
parser.add_argument('-q', '--question-file')
parser.add_argument('-a', '--answer-file-list', nargs='+', default=[])
parser.add_argument('-p', '--prompt-file')
parser.add_argument('-r', '--reviewer-file')
parser.add_argument('-o', '--output-review-file')
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
args = parser.parse_args()
ray.init()
question_jsons = get_json_list(args.question_file)
answer1_jsons = get_json_list(args.answer_file_list[0])
answer2_jsons = get_json_list(args.answer_file_list[1])
reviewer_jsons = get_json_list(args.reviewer_file)
prompt_jsons = get_json_list(args.prompt_file)
# check if # of questions, answers are the same
assert len(question_jsons) == len(answer1_jsons) == len(answer2_jsons)
handles = []
review_jsons = []
total_len = len(question_jsons)
question_idx_list = list(range(total_len))
for i in question_idx_list:
assert answer1_jsons[i]['question_id'] == question_jsons[i]['question_id'] == answer2_jsons[i]['question_id']
ques = question_jsons[i]['text']
cat = question_jsons[i]['category']
ans1 = answer1_jsons[i]['text']
ans2 = answer2_jsons[i]['text']
sys_prompt, prompt, reviewer_id = gen_prompt(reviewer_jsons, prompt_jsons, cat, ques, ans1, ans2)
review_id = shortuuid.uuid()
review_jsons.append({
'review_id': review_id,
'question_id': question_jsons[i]['question_id'],
'reviewer_id': reviewer_id,
'metadata': {},
})
# To avoid the rate limit set by OpenAI
handles.append(get_eval.remote(sys_prompt, prompt, args.max_tokens))
logger.info(f'Waiting for {REQ_TIME_GAP} seconds before sending the next request.')
time.sleep(REQ_TIME_GAP)
reviews = ray.get(handles)
with open(f'{args.output_review_file}', 'w') as output_review_file:
for idx, review in enumerate(reviews):
scores = parse_score(review)
review_jsons[idx]['text'] = review
review_jsons[idx]['score'] = scores
output_review_file.write(json.dumps(review_jsons[idx]) + '\n')
| [
"prompt_template",
"system_prompt"
] |
2024-01-10 | kaistAI/SelFee | data_augmentation~call_openai_multiprocessing_flan.py | from concurrent.futures import ProcessPoolExecutor
import argparse
import multiprocessing
import openai
from time import sleep
from random import random
import nltk
nltk.download('punkt')
from nltk import tokenize
import json
import fcntl
from typing import List
import os
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
RetryError
) # for exponential backoff
API_KEYS = os.environ["OPENAI_API_KEYS"].split(",")
MAX_WAIT_TIME=1
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input-path", type=str, required=True)
parser.add_argument("--output-path", type=str, required=True)
parser.add_argument("--fail-path", type=str, required=True)
parser.add_argument("--requests-per-minute", type=int, default=60, help="Number of requests per minute per API key")
parser.add_argument("--input-streaming", action="store_true")
parser.add_argument("--limit", type=int, default=None)
args = parser.parse_args()
if not args.input_streaming:
no_streaming(args.input_path, args.output_path, args.fail_path, args.requests_per_minute, args.limit)
else:
streaming(args.input_path, args.output_path, args.fail_path, args.requests_per_minute, args.limit)
def no_streaming(input_path, output_path, fail_path, requests_per_minute, limit):
input_items = []
input_format = input_path.split(".")[-1]
with open(input_path, "r") as input_file:
if input_format == "jsonl":
for line in input_file:
input_items.append(json.loads(line))
elif input_format == "json":
input_items = json.load(input_file)
else:
raise ValueError(f"Unknown input format: {input_format}")
if limit is not None:
input_items = input_items[:limit]
process(input_items, output_path, fail_path, requests_per_minute)
def streaming(input_path, output_path, fail_path, requests_per_minute, limit):
last_modified = None
last_items = None
while True:
stats = os.stat(input_path)
modified = stats.st_mtime
if last_modified is not None and last_modified == modified:
sleep(1)
continue
input_items = []
input_format = input_path.split(".")[-1]
with open(input_path, "r") as input_file:
if input_format == "jsonl":
for line in input_file:
input_items.append(json.loads(line))
elif input_format == "json":
input_items = json.load(input_file)
else:
raise ValueError(f"Unknown input format: {input_format}")
if limit is not None:
input_items = input_items[:limit]
if last_items is not None:
added_items = [item for item in input_items if item not in last_items]
else:
added_items = input_items
process(added_items, output_path, fail_path, requests_per_minute)
last_modified = modified
def process(input_items, output_path, fail_path, requests_per_minute):
num_api_keys = len(API_KEYS)
requests_per_minute_total = requests_per_minute * num_api_keys
with ProcessPoolExecutor(max_workers=num_api_keys * requests_per_minute) as executor:
for item_index, item in enumerate(input_items):
api_key = API_KEYS[item_index % num_api_keys]
executor.submit(call_api_and_save, api_key=api_key, item=item, output_path=output_path, fail_path=fail_path)
sleep(1 / requests_per_minute_total * 60)
def call_api_and_save(api_key: str, item: dict, output_path: str, fail_path: str):
try:
output_item = call_api(api_key, item)
success = True
except Exception as e:
print(e)
success = False
if success:
output_line = json.dumps(output_item)
with open(output_path, "a") as output_file:
fcntl.flock(output_file, fcntl.LOCK_EX)
output_file.write(output_line + "\n")
fcntl.flock(output_file, fcntl.LOCK_UN)
else:
fail_line = json.dumps(item)
with open(fail_path, "a") as fail_file:
fcntl.flock(fail_file, fcntl.LOCK_EX)
fail_file.write(fail_line + "\n")
fcntl.flock(fail_file, fcntl.LOCK_UN)
def call_api(api_key: str, task: str):
openai.api_key = api_key
last_flag = False
item = {}
item["iteration_truncated"]=False
# print('Testing %s ...' % task)
# print('testing index %d-------------------------------------------------------------------------------------'%index)
instruction = task["instruction"]
gold_output = task["output"]
item["output"]=gold_output
add = task["input"]
if add!="":
instruction+="\n"
instruction+=add
item['instruction']=instruction
item['input']=""
outputs=[]
message_teacher=[]
message_student=[]
message_teacher.append({"role": "user", "content": "You are given a role as a teaching assistant. You are going to get an instruction and its corresponding answer. Your job is to make me find the answer by giving an appropriate feedback. If the proposed answer is different from the ground truth, that means there should be some revision. You should guide me by giving an instruction that helps me find the answer without directly mentioning it. You should not directly mention about the answer when giving a feedback, because that is a cheating." })
constrain = f"Here is the instruction:\n{instruction}\n"
message_teacher.append({"role": "user", "content": constrain})
message_teacher.append({"role": "user", "content": f"Here is the answer:\n{gold_output}\n"})
message_student.append({"role": "user", "content": instruction})
# Describe possible feedbacks that can improve the answer. The feedback should be under 100 words. If there is nothing to improve, just say \"No Feedback\".Feedback:"})
# source ="Instruction: " + task["instruction"] + '\Answer: ' + output + "\n\nAre there any comments or feedbacks for the above answer? If so, write one within 100 words. Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end.\n\Feedback1:"
# print(source)
# message.append({"role": "user", "content": instruction+":\n"+input})
# message.append({"role":"user","content":"Are there any comments or critiques for the above answer? If so, write one within 100 words. If not, just say \"Revision is not needed\"."})
output = completion_with_backoff(
model="gpt-3.5-turbo",
messages = message_student,
temperature=1.0,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0
# stop=["\n\n"]
)['choices'][0]['message']['content']
#0
while output!=delete_revision_word_from_last(output):
output = delete_revision_word_from_last(output)
# print("initial answer:", output)
message_student.append({"role": "assistant", "content": output})
message_teacher.append({"role": "user", "content": f"Here is my initial answer:\n{output}\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:"})
# Describe possible feedbacks that can improve the answer. The feedback should be under 100 words. If there is nothing to improve, just say \"No Feedback\".Feedback:"})
# source ="Instruction: " + task["instruction"] + '\Answer: ' + output + "\n\nAre there any comments or feedbacks for the above answer? If so, write one within 100 words. Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end.\n\Feedback1:"
# print(source)
# message.append({"role": "user", "content": instruction+":\n"+input})
# message.append({"role":"user","content":"Are there any comments or critiques for the above answer? If so, write one within 100 words. If not, just say \"Revision is not needed\"."})
feedback = completion_with_backoff(
model="gpt-3.5-turbo",
messages = message_teacher,
temperature=1.0,
max_tokens=128,
top_p=1,
frequency_penalty=0,
presence_penalty=0
# stop=["\n\n"]
)
feedback = feedback['choices'][0]['message']['content']
#1: 0
message_teacher.append({"role": "assistant", "content": feedback})
message_student.append({"role": "user", "content": feedback})
outputs.append({"output":output, "feedback":feedback})
# print("feedback:", feedback)
iteration=1
while ("no critique" not in message_student[-1]["content"].lower()) and ("no revision" not in message_student[-1]["content"].lower()) and ("no need" not in message_student[-1]["content"].lower()) and ("not needed" not in message_student[-1]["content"].lower()):
if iteration>=5:
item["iteration_truncated"]=True
break
#2 6
if last_flag == False:
message_student.append({"role":"user","content":"Revise the answer based on your own critique with minimal edits. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake. Write as if the revised answer is the first try.\nRevision:"})
else:
message_student.append({"role":"user","content":"Revise the answer based on your own critique with minimal edits. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake.\nRevision:"})
answer = completion_with_backoff(
model="gpt-3.5-turbo",
messages = message_student,#0,1,2 6,5,4
temperature=1.0,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)['choices'][0]['message']['content']
if ("N/A" not in gold_output and "N/A" in answer):
break
iteration +=1
while answer!=delete_revision_word_from_last(answer):
answer = delete_revision_word_from_last(answer)
answer_split = answer.split('.')
if "i apologize" in answer_split[0].lower():
answer_split=answer_split[1:]
answer = ('.').join(answer_split)
# print("answer:", answer)
#3 7
message_student.append({"role": "assistant", "content": answer})
message_teacher.append({"role": "user", "content": answer})
#4 8
#if iteration>2:
# last_flag = True
if last_flag ==False:
# message_teacher.append({"role": "user", "content": f"{constrain}Here is a proposed answer:\n{answer}\n\nHere is the ground truth answer:\n{gold_output}\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. If the proposed answer is different from the ground truth, that means there should be some revision.\n\nCritique:"})
message_teacher.append({"role": "user", "content": f"Here is a revised proposed answer:\n{answer}\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:"})
else:
# message_teacher=[{"role": "user", "content": f"{constrain}Here is a proposed answer:\n{answer}\n\nHere is the ground truth answer:\n{gold_output}\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. If the proposed answer is different from the ground truth, that means there should be some revision.\n\nCritique:"}]
message_teacher=[{"role": "user", "content": f"Here is a revised proposed answer:\n{answer}\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:"}]
#message=[{"role":"user","content":f"{constrain}Here is a proposed answer:\n{answer}\nDescribe possible feedbacks that can improve the answer. The feedback should be under 100 words. If there is nothing to improve, just say \"No Feedback\".Feedback:"}]
feedback = completion_with_backoff(
model="gpt-3.5-turbo",
messages = message_teacher,#4
temperature=1.0,
max_tokens=128,
top_p=1,
frequency_penalty=0,
presence_penalty=0
# stop=["\n\n"]
)['choices'][0]['message']['content']
feedback_sentence= feedback.split('.')
if "but" in feedback_sentence[-1].lower() and("revision is needed" in feedback_sentence[-1].lower() or "revision is not needed"in feedback_sentence[-1].lower() ):
if"revision is needed" in feedback_sentence[-1].lower() :
last_sentence = "Revision is needed"
else:
last_sentence = "Revision is not needed"
feedback_sentence[-1] = last_sentence
feedback=('.').join(feedback_sentence)
# print("feedback:", feedback)
#5
message_student.append({"role": "user", "content": feedback})
message_teacher.append({"role": "assistant", "content": feedback})
outputs.append({"output":answer, "feedback":feedback})
item["iteration"]=iteration
item['outputs']=outputs
# revised_list.append(item)
# print(message_student)
# print(message_teacher)
return item
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def completion_with_backoff(**kwargs):
try:
return openai.ChatCompletion.create(**kwargs)
except Exception as e:
print('-------------------------------------------------------------------------------------')
print(e)
# print("kwargs", kwargs)
# print("API key", openai.api_key)
print('-------------------------------------------------------------------------------------')
raise e
def delete_revision_word_from_last(answer):
line_split = answer.split('\n')
if len(line_split)>0:
original = ('\n').join(line_split[:-1])
sentence_split = tokenize.sent_tokenize(line_split[-1])
if len(sentence_split)>0:
if "revision " in sentence_split[-1]:
add_sentence = ('').join(sentence_split[:-1])
answer=original+'\n'+add_sentence
else:
answer= ('\n').join(line_split[:-1])
return answer
if __name__ == "__main__":
main()
| [
"Here is the answer:\nPLACEHOLDER\n",
"Revise the answer based on your own critique with minimal edits. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake. Write as if the revised answer is the first try.\nRevision:",
"Here is a revised proposed answer:\nPLACEHOLDER\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:",
"You are given a role as a teaching assistant. You are going to get an instruction and its corresponding answer. Your job is to make me find the answer by giving an appropriate feedback. If the proposed answer is different from the ground truth, that means there should be some revision. You should guide me by giving an instruction that helps me find the answer without directly mentioning it. You should not directly mention about the answer when giving a feedback, because that is a cheating.",
"Revise the answer based on your own critique with minimal edits. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake.\nRevision:",
"Here is my initial answer:\nPLACEHOLDER\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:"
] |
2024-01-10 | kaistAI/SelFee | data_augmentation~call_openai_multiprocessing_alpaca.py | from concurrent.futures import ProcessPoolExecutor
import argparse
import multiprocessing
import openai
from time import sleep
from random import random
import nltk
nltk.download('punkt')
from nltk import tokenize
import json
import fcntl
from typing import List
import os
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
RetryError
) # for exponential backoff
API_KEYS = os.environ["OPENAI_API_KEYS"].split(",")
MAX_WAIT_TIME=1
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input-path", type=str, required=True)
parser.add_argument("--output-path", type=str, required=True)
parser.add_argument("--fail-path", type=str, required=True)
parser.add_argument("--requests-per-minute", type=int, default=60, help="Number of requests per minute per API key")
parser.add_argument("--input-streaming", action="store_true")
parser.add_argument("--limit", type=int, default=None)
args = parser.parse_args()
if not args.input_streaming:
no_streaming(args.input_path, args.output_path, args.fail_path, args.requests_per_minute, args.limit)
else:
streaming(args.input_path, args.output_path, args.fail_path, args.requests_per_minute, args.limit)
def no_streaming(input_path, output_path, fail_path, requests_per_minute, limit):
input_items = []
input_format = input_path.split(".")[-1]
with open(input_path, "r") as input_file:
if input_format == "jsonl":
for line in input_file:
input_items.append(json.loads(line))
elif input_format == "json":
input_items = json.load(input_file)
else:
raise ValueError(f"Unknown input format: {input_format}")
if limit is not None:
input_items = input_items[:limit]
process(input_items, output_path, fail_path, requests_per_minute)
def streaming(input_path, output_path, fail_path, requests_per_minute, limit):
last_modified = None
last_items = None
while True:
stats = os.stat(input_path)
modified = stats.st_mtime
if last_modified is not None and last_modified == modified:
sleep(1)
continue
input_items = []
input_format = input_path.split(".")[-1]
with open(input_path, "r") as input_file:
if input_format == "jsonl":
for line in input_file:
input_items.append(json.loads(line))
elif input_format == "json":
input_items = json.load(input_file)
else:
raise ValueError(f"Unknown input format: {input_format}")
if limit is not None:
input_items = input_items[:limit]
if last_items is not None:
added_items = [item for item in input_items if item not in last_items]
else:
added_items = input_items
process(added_items, output_path, fail_path, requests_per_minute)
last_modified = modified
def process(input_items, output_path, fail_path, requests_per_minute):
num_api_keys = len(API_KEYS)
requests_per_minute_total = requests_per_minute * num_api_keys
with ProcessPoolExecutor(max_workers=num_api_keys * requests_per_minute) as executor:
for item_index, item in enumerate(input_items):
api_key = API_KEYS[item_index % num_api_keys]
executor.submit(call_api_and_save, api_key=api_key, item=item, output_path=output_path, fail_path=fail_path)
sleep(1 / requests_per_minute_total * 60)
def call_api_and_save(api_key: str, item: dict, output_path: str, fail_path: str):
try:
output_item = call_api(api_key, item)
success = True
except Exception as e:
success = False
if success:
output_line = json.dumps(output_item)
with open(output_path, "a") as output_file:
fcntl.flock(output_file, fcntl.LOCK_EX)
output_file.write(output_line + "\n")
fcntl.flock(output_file, fcntl.LOCK_UN)
else:
fail_line = json.dumps(item)
with open(fail_path, "a") as fail_file:
fcntl.flock(fail_file, fcntl.LOCK_EX)
fail_file.write(fail_line + "\n")
fcntl.flock(fail_file, fcntl.LOCK_UN)
def call_api(api_key: str, task: str):
openai.api_key = api_key
last_flag = False
item = {}
item["iteration_truncated"]=False
print('Testing %s ...' % task)
# print('testing index %d-------------------------------------------------------------------------------------'%index)
instruction = task["instruction"]
input = task["input"]
item['instruction']=instruction
item['input']=input
outputs=[]
message=[]
if input == "":
context = f"{instruction}\n"
else:
context = f"{instruction}\n{input}\n"
#0
message.append({"role": "user", "content": f"{context}"})
answer = completion_with_backoff(
model="gpt-3.5-turbo",
messages = message,#0,1,2 6,5,4
temperature=1.0,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)['choices'][0]['message']['content']
print("answer:", answer)
#3 7
# message.append({"role": "assistant", "content": answer})
message=[{"role": "user", "content": f"{context}Here is a proposed answer:\n{answer}\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:"}]
# Describe possible feedbacks that can improve the answer. The feedback should be under 100 words. If there is nothing to improve, just say \"No Feedback\".Feedback:"})
# source ="Instruction: " + task["instruction"] + '\Answer: ' + output + "\n\nAre there any comments or feedbacks for the above answer? If so, write one within 100 words. Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end.\n\Feedback1:"
# print(source)
# message.append({"role": "user", "content": instruction+":\n"+input})
# message.append({"role":"user","content":"Are there any comments or critiques for the above answer? If so, write one within 100 words. If not, just say \"Revision is not needed\"."})
feedback = completion_with_backoff(
model="gpt-3.5-turbo",
messages = message,
temperature=1.0,
max_tokens=128,
top_p=1,
frequency_penalty=0,
presence_penalty=0
# stop=["\n\n"]
)
feedback = feedback['choices'][0]['message']['content']
#1: 0
message.append({"role": "assistant", "content": feedback})
outputs.append({"output":answer, "feedback":feedback})
print("feedback:", feedback)
iteration=1
while("no critique" not in message[-1]["content"].lower()) and ("no revision" not in message[-1]["content"].lower()) and ("no need" not in message[-1]["content"].lower()) and ("not needed" not in message[-1]["content"].lower()):
if iteration>=5:
item["iteration_truncated"]=True
break
#2 6
if last_flag == False:
message.append({"role":"user","content":"Revise the answer based on your own critique within 500 words. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake. Write as if the revised answer is the first try.\nRevision:"})
else:
message.append({"role":"user","content":"Revise the answer based on your own critique within 500 words. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake.\nRevision:"})
answer = completion_with_backoff(
model="gpt-3.5-turbo",
messages = message,#0,1,2 6,5,4
temperature=1.0,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)['choices'][0]['message']['content']
if ("N/A" in answer):
break
iteration +=1
while answer!=delete_revision_word_from_last(answer):
answer = delete_revision_word_from_last(answer)
answer_split = answer.split('.')
if "i apologize" in answer_split[0].lower():
answer_split=answer_split[1:]
answer = ('.').join(answer_split)
print("answer:", answer)
#3 7
message.append({"role": "assistant", "content": answer})
#4 8
if iteration>2:
last_flag = True
if last_flag ==False:
message.append({"role": "user", "content": f"{context}Here is a proposed answer:\n{answer}\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:"})
else:
message=[{"role": "user", "content": f"{context}Here is a proposed answer:\n{answer}\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:"}]
#message=[{"role":"user","content":f"{context}Here is a proposed answer:\n{answer}\nDescribe possible feedbacks that can improve the answer. The feedback should be under 100 words. If there is nothing to improve, just say \"No Feedback\".Feedback:"}]
feedback = completion_with_backoff(
model="gpt-3.5-turbo",
messages = message,#4
temperature=1.0,
max_tokens=128,
top_p=1,
frequency_penalty=0,
presence_penalty=0
# stop=["\n\n"]
)['choices'][0]['message']['content']
feedback_sentence= feedback.split('.')
if "but" in feedback_sentence[-1].lower() and("revision is needed" in feedback_sentence[-1].lower() or "revision is not needed"in feedback_sentence[-1].lower() ):
if"revision is needed" in feedback_sentence[-1].lower() :
last_sentence = "Revision is needed"
else:
last_sentence = "Revision is not needed"
feedback_sentence[-1] = last_sentence
feedback=('.').join(feedback_sentence)
print("feedback:", feedback)
#5
message.append({"role": "assistant", "content": feedback})
outputs.append({"output":answer, "feedback":feedback})
item["iteration"]=iteration
item['outputs']=outputs
return item
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def completion_with_backoff(**kwargs):
try:
return openai.ChatCompletion.create(**kwargs)
except Exception as e:
print('-------------------------------------------------------------------------------------')
print(e)
print("kwargs", kwargs)
print("API key", openai.api_key)
print('-------------------------------------------------------------------------------------')
raise e
def delete_revision_word_from_last(answer):
line_split = answer.split('\n')
if len(line_split)>0:
original = ('\n').join(line_split[:-1])
sentence_split = tokenize.sent_tokenize(line_split[-1])
if len(sentence_split)>0:
if "revision " in sentence_split[-1]:
add_sentence = ('').join(sentence_split[:-1])
answer=original+'\n'+add_sentence
else:
answer= ('\n').join(line_split[:-1])
return answer
if __name__ == "__main__":
main()
| [
"Revise the answer based on your own critique within 500 words. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake. Write as if the revised answer is the first try.\nRevision:",
"PLACEHOLDER",
"PLACEHOLDERHere is a proposed answer:\nPLACEHOLDER\n\nAre there any comments or critiques for the above answer? If so, write one under 100 words. You may score the quality of the answer on the scale of 1-10 (1: no code/no sense; 10: perfect) Also, classify if revision is needed by responding \"Revision is needed\" or \"Revision is not needed\" at the end. Normally, score of less than 9 should be revised.\n\nCritique:",
"Revise the answer based on your own critique within 500 words. Your revision should be simple and clear, so do not add any rhetorics such as apology for the past mistake.\nRevision:"
] |
2024-01-10 | Neelesh99/KnowledgeSpaces | LLMServer~construct_index.py | import os
from llama_index import VectorStoreIndex, LLMPredictor, PromptHelper, Document, \
StringIterableReader, SlackReader, LangchainEmbedding, ServiceContext
from langchain import HuggingFaceHub, HuggingFacePipeline
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms.base import LLM
# ModelConfig contains the configuration for the application
class ModelConfig:
def __init__(self, max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit, temperature, model_name,
local):
self.max_input_size = max_input_size
self.num_outputs = num_outputs
self.max_chunk_overlap = max_chunk_overlap
self.chunk_size_limit = chunk_size_limit
self.temperature = temperature
self.model_name = model_name
self.local = local
def __eq__(self, o: object) -> bool:
if isinstance(o, ModelConfig):
return self.chunk_size_limit == o.chunk_size_limit and self.max_chunk_overlap == o.max_chunk_overlap and self.num_outputs == o.num_outputs and self.max_input_size == o.max_input_size and self.temperature == o.temperature and self.model_name == o.model_name and self.local == o.local
return False
# get_model_config_from_env creates a ModelConfig with defaulting from the environment variables
def get_model_config_from_env() -> ModelConfig:
max_input_size_str = os.getenv("MAX_INPUT_SIZE") if "MAX_INPUT_SIZE" in os.environ else "2048"
num_outputs_str = os.getenv("NUM_OUTPUTS") if "NUM_OUTPUTS" in os.environ else "5096"
max_chunk_overlap_str = os.getenv("MAX_CHUNK_OVERLAP") if "MAX_CHUNK_OVERLAP" in os.environ else "28"
chunk_size_limit_str = os.getenv("CHUNK_SIZE_LIMIT") if "CHUNK_SIZE_LIMIT" in os.environ else "600"
temperature_str = os.getenv("TEMPERATURE") if "TEMPERATURE" in os.environ else "0.6"
local_str = os.getenv("LOCAL") if "LOCAL" in os.environ else "True"
model_name = os.getenv("MODEL_NAME") if "MODEL_NAME" in os.environ else (
"gpt-3.5-turbo" if local_str == "False" else "declare-lab/flan-alpaca-base")
return ModelConfig(int(max_input_size_str), int(num_outputs_str), int(max_chunk_overlap_str),
int(chunk_size_limit_str), float(temperature_str), model_name, local_str == "True")
# get_prompt_helper creates an OpenAI PromptHelper instance
def get_prompt_helper(model_restrictions: ModelConfig) -> PromptHelper:
return PromptHelper(model_restrictions.max_input_size, model_restrictions.num_outputs,
float(model_restrictions.max_chunk_overlap / model_restrictions.chunk_size_limit), chunk_size_limit=model_restrictions.chunk_size_limit)
# get_vector_index creates a GPTSimpleVectorIndex from GPTIndex Documents of any form, requires LLM and ModelConfig to be specified, also allows non OpenAI Embeddings
def get_vector_index(documents: list[Document], llm: LLM, model_config: ModelConfig,
embeddings=None) -> VectorStoreIndex:
predictor = LLMPredictor(llm=llm)
prompt_helper = get_prompt_helper(model_config)
if embeddings is None:
service_context = ServiceContext.from_defaults(llm_predictor=predictor, prompt_helper=prompt_helper)
return VectorStoreIndex(documents, service_context=service_context)
else:
service_context = ServiceContext.from_defaults(llm_predictor=predictor, prompt_helper=prompt_helper, embed_model=embeddings)
return VectorStoreIndex(documents, service_context=service_context)
# get_openai_api_llm constructs an OpenAI API powered LLM model, requires OPENAI_API_TOKEN to be in environment
# variables
def get_openai_api_llm(model_config):
return ChatOpenAI(temperature=model_config.temperature, model_name=model_config.model_name,
max_tokens=model_config.num_outputs)
# get_local_llm_from_huggingface downlaods and constricts an LLM Model based on name from the HuggingFace repository
def get_local_llm_from_huggingface(model_config):
return HuggingFacePipeline.from_model_id(
model_id=model_config.model_name, task="text2text-generation",
model_kwargs={
"temperature": model_config.temperature,
# "model_max_length": model_config.num_outputs,
"max_length": model_config.num_outputs}
)
# IndexMaker provides utility wrappers for getting indexes from either slack or plain text list using either OpenAI
# API models or a local one
class IndexMaker:
@staticmethod
def get_index_from_text(list_of_text: list[str]):
documents = StringIterableReader().load_data(list_of_text)
model_config = get_model_config_from_env()
return get_vector_index(documents, get_openai_api_llm(model_config), model_config)
@staticmethod
def get_index_from_slack(channel_ids: list[str]):
documents = SlackReader().load_data(channel_ids)
model_config = get_model_config_from_env()
return get_vector_index(documents, get_openai_api_llm(model_config), model_config)
@staticmethod
def get_hf_index_from_text(list_of_text: list[str]):
documents = StringIterableReader().load_data(list_of_text)
model_config = get_model_config_from_env()
hf = IndexMaker.get_hf_embeddings()
return get_vector_index(documents, get_local_llm_from_huggingface(model_config), model_config, hf)
@staticmethod
def get_hf_index_from_docs(documents: list[Document]):
model_config = get_model_config_from_env()
hf = IndexMaker.get_hf_embeddings()
return get_vector_index(documents, get_local_llm_from_huggingface(model_config), model_config, hf)
@staticmethod
def get_hf_index_from_slack(channel_ids: list[str]):
documents = SlackReader().load_data(channel_ids)
model_config = get_model_config_from_env()
hf = IndexMaker.get_hf_embeddings()
return get_vector_index(documents, get_local_llm_from_huggingface(model_config), model_config, hf)
@staticmethod
def get_hf_embeddings():
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device': 'cpu'}
hf = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
return LangchainEmbedding(hf)
@staticmethod
def get_hf_llm_predictor():
model_config = get_model_config_from_env()
model = get_local_llm_from_huggingface(model_config)
return LLMPredictor(llm=model)
| [] |
2024-01-10 | MikeRock51/african_cuisines_recipe_api | models~chat~chatProvider.py | #!/usr/bin/env python3
from openai import OpenAI
import os
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("OPENAI key missing")
client = OpenAI(api_key=api_key)
def getChatResponse(chatHistory):
try:
completion = client.chat.completions.create(
model="gpt-3.5-turbo", messages=chatHistory, max_tokens=200)
completionText = completion.choices[0].message.content
return {"role": "assistant", "content": completionText}
except Exception as e:
raise e
| [] |
2024-01-10 | MikeRock51/african_cuisines_recipe_api | chatbot~yishu_cli.py | #!/usr/bin/env python3
from openai import OpenAI
from termcolor import colored
import os
# Load environment variables from a .env file
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("OPENAI key missing")
client = OpenAI(api_key=api_key)
def main():
print(colored("Welcome! My name is Yishu. Your AI assistant for all things nutrition. How may I be of help today?", "green", attrs=['bold']))
system_message = "Your name is Yishu. You are a food and nutrition specialist bot. You provide expert assistance on all matters related to food, nutrition and health"
chat_history = [{"role": "system", "content": system_message}]
while True:
user_input = input(colored("You: ", "yellow"))
try:
messages = chat_history
messages.append({"role": "user", "content": user_input})
completion = client.chat.completions.create(model="gpt-3.5-turbo",
messages=messages,
max_tokens=150)
completion_text = completion.choices[0].message.content
if user_input.lower() == "exit":
print(colored("Yishu: " + completion_text, "green"))
return
print(colored("Yishu: " + completion_text, "green"))
chat_history.append({"role": "user", "content": user_input})
chat_history.append({"role": "assistant", "content": completion_text})
except Exception as e:
print(colored(str(e), "red"))
if __name__ == "__main__":
main()
| [
"Your name is Yishu. You are a food and nutrition specialist bot. You provide expert assistance on all matters related to food, nutrition and health"
] |
2024-01-10 | goldenNormal/meeting-summary | utils_llm_models.py | from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import (
HumanMessage,
AIMessage,
SystemMessage
)
import time
import os
OPENAI_API_KEY,API_BASE = os.getenv('OPENAI_API_KEY'),os.getenv('API_BASE')
from langchain.chat_models import ChatOpenAI
from jinja2 import Template
gpt_35 = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()],temperature = 0.0 , openai_api_key = OPENAI_API_KEY, openai_api_base = API_BASE,
model_name='gpt-3.5-turbo-16k-0613' )
gpt4 = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()],temperature = 0.0 , openai_api_key = OPENAI_API_KEY, openai_api_base = API_BASE,
model_name='gpt-4-0314' )
def new_gpt35(temperature=0.0,**kwargs):
if temperature == 0.0 and len(kwargs) == 0:
return gpt_35
llm = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()],temperature = temperature , openai_api_key = OPENAI_API_KEY, openai_api_base = API_BASE,
model_name='gpt-3.5-turbo-16k-0613' ,**kwargs)
return llm
def new_gpt4(temperature=0.0,**kwargs):
if temperature == 0.0 and len(kwargs) == 0:
return gpt4
llm = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()],temperature = temperature , openai_api_key = OPENAI_API_KEY, openai_api_base = API_BASE,
model_name='gpt-4-0314' ,**kwargs)
return llm
def get_token_cnt(text):
import tiktoken
enc = tiktoken.get_encoding("cl100k_base")
enctext =enc.encode(text)
return len(enctext)
def jinja_format(message, **kwargs):
return Template(message).render(**kwargs)
class BaseLLM:
def __init__(self, llm) -> None:
self.llm = llm
self.max_retries = 5 # ่ฎพ็ฝฎๆๅคง้่ฏๆฌกๆฐ
self.retry_delay = 1 # ่ฎพ็ฝฎๆฏๆฌก้่ฏไน้ด็ๅปถ่ฟ๏ผ็ง๏ผ
self.msgs = []
def add_human(self,msg):
self.msgs.append(HumanMessage(content=msg))
def add_system(self,msg):
self.msgs.append(SystemMessage(content=msg))
def clear(self):
self.msgs = []
def add_AI(self,resp):
self.msgs.append(AIMessage(content=resp))
def get_msgs(self):
return self.msgs
def set_msgs(self,msgs):
self.msgs = msgs
class LLM(BaseLLM):
def __init__(self, llm) -> None:
super().__init__(llm)
def get_reply(self):
AImsg = self.llm(self.msgs)
resp = AImsg.content
return resp
def get_time_cost_reply(self):
start = time.time()
resp = self.get_reply()
end = time.time()
print(f'cost time: {end-start}')
return resp
| [] |
2024-01-10 | jhmatthews/alpro | alpro~models.py | import numpy as np
from scipy.interpolate import interp1d
import types
import os
from scipy.integrate import simps
class units:
'''
class containing some units. Should probably use astropy units
but I find them a bit annoying.
'''
def __init__(self):
self.kpc = 3.0857e21
self.pc = 3.0857e18
self.c = 2.997925e10
self.yr = 3.1556925e7
self.myr = 3.1556925e13
self.kyr = 3.1556925e10
self.radian = 57.29577951308232
self.msol = 1.989e33
self.mprot = 1.672661e-24
self.melec = 9.10938356e-28
self.melec_csq = self.melec * self.c * self.c
self.mprot_csq = self.mprot * self.c * self.c
self.e = 4.8032045057134676e-10 # fundamental charge
self.kev = 1.602192e-9 # kilo electron volts in CGS
self.ev = 1.602192e-12 # electron volts in CGS
self.kb = 1.38062e-16 # boltzmann
self.h = 6.6262e-27 # plank
self.hbar = self.h / np.pi / np.pi
self.hbar_ev = 6.582119569e-16
self.g = 6.670e-8 # gravitational
self.hbar_c = self.hbar * self.c
self.alpha = self.e * self.e / self.hbar_c
self.thomson = 0.66524e-24
self.unit_gauss_natural = 0.01953548032
self.unit_length_natural = 50676.79373667135
# class to use for units
unit = units()
def random_angle(size=None):
'''
compute theta and phi coordinates for
a random isotropic angle
'''
costheta = (2.0 * np.random.random(size=size)) - 1.0
phi = 2.0 * np.pi * np.random.random(size=size)
theta = np.arccos(costheta)
return (theta, phi)
def fterm(r, C, alpha):
term1 = -alpha * np.cos(alpha * r)
term2 = np.sin(alpha * r) / r
F0 = C * alpha * alpha * (alpha * np.cos(alpha) - np.sin(alpha))
term3 = F0 * r * r / alpha / alpha
f = C * (term1 + term2) + term3
return f
def fprime(r, C, alpha):
term1 = alpha * alpha * np.sin(alpha * r)
term2 = (alpha * np.cos(alpha * r) / r) - (np.sin(alpha * r) / r / r)
F0 = C * alpha * alpha * (alpha * np.cos(alpha) - np.sin(alpha))
term3 = 2.0 * F0 * r / alpha / alpha
f = C * (term1 + term2) + term3
return f
def libanov_Br(r, alpha=5.76, theta=np.pi / 4.0, C=6e-8):
f = fterm(r, C, alpha)
Br = 2.0 * np.cos(theta) * f / r / r
return (-Br)
def get_libanov_B(r, theta=np.pi / 4, Rcavity=93.0, alpha=5.76, C=6e-8):
rnorm = r / Rcavity
fr = fterm(rnorm, C, alpha)
Br = 2.0 * np.cos(theta) * fr / rnorm / rnorm
Btheta = -np.sin(theta) * fprime(rnorm, C, alpha) / rnorm
Bphi = alpha * np.sin(theta) * fr / rnorm
# truncate the field beyond Rcavity
Btheta[rnorm > 1] = 0.0
Bphi[rnorm > 1] = 0.0
Br[rnorm > 1] = 0.0
return (Btheta, Bphi, Br)
def get_libanov_B_old(r, include_radial=True):
x = r / (93.0)
Bx = (0.00312443 * (x**18)) - (0.0319991 * (x**16)) + (0.260311 * (x**14)) - (1.63197 * (x**12)) + \
(7.58002 * (x**10)) - (24.721 * (x**8)) + (52.3929 * (x**6)) - \
(63.8794 * (x**4)) + (35.8973 * (x**2)) - 5.86899
By = (0.0102459 * (x**17)) - (0.0937683 * (x**15)) + (0.671841 * (x**13)) - (3.6406 * (x**11)) + \
(14.2479 * (x**9)) - (37.7455 * (x**7)) + \
(61.3611 * (x**5)) - (51.7231 * (x**3)) + (16.9128 * x)
if include_radial:
Bz = libanov_Br(x)
return 1e-6 * Bx, 1e-6 * By, Bz
else:
return 1e-6 * Bx, 1e-6 * By
def churazov_density(r):
'''
Density function from churazov et al 2003
Given as equation (2) of Reynolds et al. 2020
Parameters:
r float
distance from cluster centre in kpc
'''
term1 = 3.9e-2 / ((1.0 + (r / 80.0)**2)**1.8)
term2 = 4.05e-3 / ((1.0 + (r / 280.0)**2)**0.87)
return (term1 + term2)
class ClusterProfile:
'''
container for a magnetic field and density profile for a cluster
'''
def __init__(self, model="a", plasma_beta=100, B_rms=None, n=None):
self.plasma_beta = plasma_beta
self.model = model
if model == "a":
# Model A from Reynolds+ 2020
self.get_B = self.B_modA
self.density = churazov_density
self.n0 = self.density(0.0)
self.B0 = 2.5e-5
self.B_exponent = 0.7
elif model == "b":
# Model B from Reynolds+ 2020
self.get_B = self.B_modB
self.density = churazov_density
self.n25 = self.density(25.0)
elif model == "flat":
'''
allow to just have a uniform field
'''
self.B_rms = B_rms
self.n = n
self.get_B = self.Bflat
self.density = churazov_density
elif model == "murgia":
self.n0 = 1e-3
self.r0 = 400.0
self.B_exponent = 0.5
self.beta = 0.6
self.B0 = 1e-6
self.density = self.beta_density
self.get_B = self.B_modA
elif model == "russell":
self.n0 = 2.63
self.pl_alpha = 1.16
self.P0 = 1.85e-9
self.r_bend = 511.0
self.a_low = 0.47
self.a_high = 2.54
self.density = self.pl_density
self.get_B = self.BendingPL_B
elif model == "custom":
print("Warning: Custom model specified - make sure get_B & density methods are populated or domain set manually!")
else:
raise ValueError(
"ClusterProfile did not understand model type {}".format(model))
def beta_r(self, r):
if callable(self.plasma_beta):
return (self.plasma_beta(r))
else:
return (self.plasma_beta)
def churazov_density(self, r):
return (churazov_density(r))
def beta_density(self, r):
'''
beta law density.
'''
exponent = -3.0 * self.beta / 2.0
n = self.n0 * (1 + (r / self.r0)**2) ** exponent
return (n)
def pl_density(self, r):
'''
power law density.
'''
return (self.n0 * (r ** -self.pl_alpha))
def BendingPL_B(self, r):
'''
bending power law density.
'''
numer = r**-self.a_low
denom = 1 + (r / self.r_bend)**(self.a_high - self.a_low)
P = self.P0 * (numer / denom)
B = np.sqrt(P * 4.0 * np.pi / self.beta_r(r))
return B
def Bflat(self):
'''
uniform magentic field
'''
return (self.B_rms)
def nflat(self):
'''
uniform density
'''
return (self.n)
def B_modA(self, r):
'''
Model A from Reynolds et al. 2020
Parameters:
r float
distance from cluster centre in kpc
'''
beta = self.beta_r(r)
return (self.B0 * (self.density(r) / self.n0)
** self.B_exponent * 100.0 / beta)
def B_modB(self, r, B25=7.5e-6):
'''
Model B from Reynolds et al. 2020
Parameters:
r float
distance from cluster centre in kpc
'''
beta = self.beta_r(r)
B = B25 * np.sqrt(self.density(r) / self.n25 * 100.0 / beta)
return (B)
def profile(self, r):
'''
wrapper to the density and magentic field functions
'''
return (self.density(r), self.get_B(r))
class ClusterFromFile:
def __init__(self, fname="Bfield.npy", model_type="cube"):
# load the array
self.Bfull = np.load(fname)
self.N = self.Bfull.shape[0]
self.mid = self.N // 2
self.density = churazov_density
if model_type == "cube":
if any(i != self.N for i in self.Bfull.shape[:-1]):
raise ValueError(
"File supplied must be cube shaped but has shape {}".format(
self.Bfull.shape))
elif model_type == "1d":
self.z = self.Bfull[0, :]
self.B = np.transpose(self.Bfull[1:, :])
interp_x_temp = interp1d(self.z, self.B[:, 0], kind='slinear')
interp_y_temp = interp1d(self.z, self.B[:, 1], kind='slinear')
interp_z_temp = interp1d(self.z, self.B[:, 2], kind='slinear')
# actual interpolation always done using 2nd order interp
kind = "quadratic"
# kind='quadratic'
self.interp_x = interp1d(self.z, self.B[:, 0], kind=kind)
self.interp_y = interp1d(self.z, self.B[:, 1], kind=kind)
self.interp_z = interp1d(self.z, self.B[:, 2], kind=kind)
def slice(self, z, L=100.0, axis=0, sign=1, degrade=1, normalise=1.0):
if axis == 0:
self.B = self.Bfull[:, self.mid, self.mid, :]
elif axis == 1:
self.B = self.Bfull[self.mid, :, self.mid, :]
elif axis == 2:
self.B = self.Bfull[self.mid, self.mid, :, :]
if sign > 0:
self.B = self.B[self.mid:, :]
else:
self.B = self.B[:self.mid, :]
self.B *= normalise
# take a slice along the B field
from scipy.interpolate import interp1d
ztrue = z
self.z = np.linspace(0, L, len(self.B[:, 0]) / degrade)
if degrade > 1:
# these functions will allow us to degrade the resolution using
# linear spline interp
interp_x_temp = interp1d(ztrue, self.B[:, 0], kind='slinear')
interp_y_temp = interp1d(ztrue, self.B[:, 1], kind='slinear')
interp_z_temp = interp1d(ztrue, self.B[:, 2], kind='slinear')
self.B = np.zeros((len(self.z), 3))
self.B[:, 0] = interp_x_temp(self.z)
self.B[:, 1] = interp_y_temp(self.z)
self.B[:, 2] = interp_z_temp(self.z)
elif degrade < 1:
raise ValueError("degrade needs to be >= 1!")
# actual interpolation always done using 2nd order interp
kind = "quadratic"
# kind='quadratic'
self.interp_x = interp1d(self.z, self.B[:, 0], kind=kind)
self.interp_y = interp1d(self.z, self.B[:, 1], kind=kind)
self.interp_z = interp1d(self.z, self.B[:, 2], kind=kind)
def get_Bz(self, z):
Bz = self.interp_z(z)
return (Bz)
def get_B(self, z):
'''
get the two perpendicular components of the magnetic field at
distance z
'''
Bx = self.interp_x(z)
By = self.interp_y(z)
return (Bx, By)
def profile(self, r):
return (self.density(r), self.get_B(r))
def omega_p(ne):
'''
calculate the plasma frequency in natural (eV) units from an electron density
Parameters:
ne float/array-like
electron density in cm^-3
'''
omega_p = np.sqrt(4.0 * np.pi * unit.e * unit.e *
ne / unit.melec) * unit.hbar_ev
return (omega_p)
# Possibly this should be renamed to Domain, or similar
class FieldModel:
def __init__(self, profile, plasma_beta=100, coherence_r0=None):
self.profile = profile
self.beta = plasma_beta
# coherence_r0 scales the coherence lengths with radius
# by a factor of (1 + r/coherence_r0), in kpc
self.coherence_r0 = coherence_r0
self.Bz = 1.0
def create_libanov_field(self, deltaL=1.0, Lmax=93.0,
density=None, theta=np.pi / 4.0):
'''
Initialise uniform field model of Libanov & Troitsky.
Parameters:
deltaL float
resolution of domain in kpc
Lmax float
maximum radius in kpc
density str / Nonetype
if None, use vanishing density. if set
'''
self.r = np.arange(0, Lmax, deltaL)
self.deltaL = np.ones_like(self.r) * deltaL
self.rcen = self.r + (0.5 * self.deltaL)
self.Bx, self.By, self.Bz = get_libanov_B(self.rcen, theta=theta)
self.B = np.sqrt(self.Bx**2 + self.By**2)
self.phi = np.arctan2(self.Bx, self.By)
if density is None:
self.ne = 1e-20 * np.ones_like(self.rcen) # ย vanishing density
elif density == "churazov":
self.ne = churazov_density(self.rcen)
else:
raise ValueError("density keyword must be Nonetype or churazov")
#self.rm = self.get_rm()
self.omega_p = omega_p(self.ne)
def uniform_field_z(self, deltaL=1.0, Lmax=1800.0):
'''
Set up a uniform radial field model with a uniform field sampled at N points.
Parameters:
deltaL float
size of boxes in kpc
Lmax float
size of domain in kpc
'''
self.r = np.arange(0, Lmax - deltaL, deltaL)
self.deltaL = np.ones_like(self.r) * deltaL
self.rcen = self.r + (0.5 * self.deltaL)
self.Bx, self.By = 0.0, 0.0
self.ne, self.Bz = self.profile(self.rcen)
self.B = np.sqrt(self.Bx**2 + self.By**2)
self.phi = np.zeros_like(self.Bx)
self.omega_p = omega_p(self.ne)
def single_box(self, phi, B, L, ne, N=1):
'''
Set up a Field model with a uniform field sampled at N points.
Parameters:
phi float
angle between perpendicular magnetic field and y axis
B float
magnetic field strength in Gauss
L float
size of domain in kpc
ne float
electron density in cm^-3
'''
self.r = np.linspace(0, L, N)
self.deltaL = np.ones_like(self.r) * (L - self.r[-1])
self.rcen = self.r + (0.5 * self.deltaL)
self.ne = np.ones_like(self.r) * ne
self.phi = np.ones_like(self.r) * phi
self.B = np.ones_like(self.r) * B
self.Bx = self.B * np.sin(self.phi)
self.By = self.B * np.cos(self.phi)
self.Bz = np.zeros_like(self.phi)
def get_rm(self, cell_centered=True):
r'''
Calculate the rotation measure of the field model using Simpson integration.
Equation is :math:`RM= 812 \int n_e B_z dz` with the field in microGauss
Returns:
the rotation measure of the field model in rad m^-2
'''
#prefactor = (unit.e ** 3) / 2.0 / np.pi / unit.melec_csq / unit.melec_csq
prefactor = 812.0
if cell_centered:
r = self.rcen
else:
r = self.r
integral = simps(self.ne * self.Bz * 1e6, r)
return (prefactor * integral)
def domain_from_slice(self, Cluster, deltaL=1.0, Lmax=500.0, r0=0.0):
npoints = int((Lmax - r0) // deltaL)
self.r = np.linspace(r0, Lmax - deltaL, npoints)
self.deltaL = np.ones_like(self.r) * deltaL
self.rcen = self.r + (0.5 * self.deltaL)
self.Bx, self.By = Cluster.get_B(self.rcen)
self.Bz = Cluster.get_Bz(self.rcen)
self.B = np.sqrt(self.Bx**2 + self.By**2)
self.phi = np.arctan2(self.Bx, self.By)
self.ne = Cluster.density(self.r)
self.omega_p = omega_p(self.ne)
def resample_box(self, new_redge, interp1d_kwargs={
"kind": "quadratic", "fill_value": "extrapolate"}, profile=True):
'''
Resample a box array on to a new 1D grid using 1d interpolation.
Must be called after the Bx, By, r arrays are already populated.
'''
interp_array_r = np.concatenate(
(self.r[0:1], self.rcen, self.r[-1:] + self.deltaL[-1:]))
interp_Bx = np.concatenate((self.Bx[0:1], self.Bx, self.Bx[-1:]))
interp_By = np.concatenate((self.By[0:1], self.By, self.By[-1:]))
interp_x = interp1d(interp_array_r, interp_Bx, **interp1d_kwargs)
interp_y = interp1d(interp_array_r, interp_By, **interp1d_kwargs)
interp_Bz = np.concatenate((self.Bz[0:1], self.Bz, self.Bz[-1:]))
interp_z = interp1d(interp_array_r, interp_Bz, **interp1d_kwargs)
# populate new values
self.rcen = 0.5 * (new_redge[1:] + new_redge[:-1])
self.Bx = interp_x(self.rcen)
self.By = interp_y(self.rcen)
self.Bz = interp_z(self.rcen)
self.r = new_redge[:-1]
self.deltaL = new_redge[1:] - new_redge[:-1]
self.B = np.sqrt(self.Bx**2 + self.By ** 2)
self.phi = np.arctan2(self.Bx, self.By)
if profile:
self.ne, _ = self.profile(self.rcen)
else:
interp_ne = np.concatenate((self.ne[0:1], self.ne, self.ne[-1:]))
interp_n = interp1d(interp_array_r, interp_ne, **interp1d_kwargs)
self.ne = interp_n(self.rcen)
def create_box_array(self, L, random_seed, coherence,
r0=10.0, cell_centered=True):
'''
create an array of random magnetic field boxes by drawing
random angles and box sizes from coherence_func.
Parameters:
L float
size of domain in kiloparsecs
random_seed int
random number seed
coherence_func function or float
function that computes coherence length at distance r,
or a single-value floating point number if the coherence
length is constant.
r0 float
inner radius of the calculation (used to excide an inner region)
'''
if isinstance(coherence, float) == False and callable(
coherence) == False:
raise TypeError("kwarg coherence must be callable or a float.")
# set random number seed
np.random.seed(random_seed)
#ย initialise arrays and counters
r = r0
rcen = r0
rcen_array, r_array = [], []
deltaL_array = []
#ย wonder if there's a better way to do this?
while r < L:
# get a coherence length which will be the size of the box
# this can be a function or a float
if callable(coherence):
lc = coherence()
else:
lc = coherence
if self.coherence_r0 is not None:
lc *= (1.0 + (r / (self.coherence_r0)))
# ensure the simulation is truncated at distance L
if (r + lc) > L:
lc = (L - r) + 1e-10
# if rcen == r0:
# rcen += lc / 2.0
# else:
# rcen += lc
# rcen_array.append(rcen)
r_array.append(r)
deltaL_array.append(lc)
r += lc
rcen = r - (lc / 2.0)
rcen_array.append(rcen)
# ย now we have box sizes and radii, get the field and density in each
# box
Ncells = len(r_array)
self.r = np.array(r_array)
#rcen_array = np.array(0.5 * (self.r[1:] + self.r[:-1]))
self.rcen = np.array(rcen_array)
self.deltaL = np.array(deltaL_array)
#ย draw random isotropic angles and save phi
theta, phi = random_angle(size=Ncells)
#phi = phi
# get density and magnetic field strength at centre of box
if cell_centered:
rprofile = self.rcen
else:
rprofile = self.r
self.ne, Btot = self.profile(rprofile)
self.cell_centered = cell_centered
#ย get the x and y components and increment r
#Bx_array.append(B * np.sin(theta2))
#y_array.append(B * np.cos(theta2))
self.Bx, self.By, self.Bz = self.get_B_comp_from_angles(
Btot, theta, phi)
#self.Bx = Btot * np.sin(theta) * np.cos(phi)
#self.By = Btot * np.sin(theta) * np.sin(phi)
self.theta = theta
#ย note B is actually Bperp
self.B = np.sqrt(self.Bx**2 + self.By ** 2)
self.phi = np.arctan2(self.Bx, self.By)
#self.phi = phi
self.rm = self.get_rm()
self.omega_p = omega_p(self.ne)
#print (self.rm)
def get_B_comp_from_angles(self, Btot, theta, phi):
Bx = Btot * np.sin(theta) * np.cos(phi)
By = Btot * np.sin(theta) * np.sin(phi)
Bz = Btot * np.cos(theta)
return (Bx, By, Bz)
def resonance_prune(self, mass, threshold=0.1, refine=50, required_res=3):
# first identify any close-to resonances
delta = np.log10(self.omega_p) - np.log10(mass)
select = (np.fabs(delta) < threshold)
# copy the domain to a new class
domain_to_return = CopyDomain(self)
# if no close to resonances, nothing to be done
# also don't worry about cases where the resonance happens off the end of the
# array
if (np.sum(select) == 0) or (
np.argmin(np.fabs(delta)) == len(self.omega_p) - 1):
return (domain_to_return)
# find non zero parts of selection
index = np.asarray(select).nonzero()[0]
if len(index) > required_res:
# multiple domains are close to resonance. This means
#ย we must be resolving the resonance relatively well,
# so we just discard the the two points that span the actual resonance
# Just discard the closest two
closest1 = np.where(delta > 0, delta, np.inf).argmin()
closest2 = np.where(-delta > 0, -delta, np.inf).argmin()
ind1 = np.min((closest1, closest2))
ind2 = np.max((closest1, closest2))
if (ind2 - ind1) != 1:
print(
"Warning: resonance_prune: values close to resonance are not adjacent!")
attrs_to_mod = [
"ne",
"Bx",
"By",
"Bz",
"B",
"phi",
"deltaL",
"r",
"rcen",
"Bz",
"omega_p"]
for a in attrs_to_mod:
arr = getattr(domain_to_return, a)
to_concat = (arr[:ind1], arr[ind2 + 1:])
arr_new = np.concatenate(to_concat)
setattr(domain_to_return, a, arr_new)
#self.rm = self.get_rm()
# there are only a few domains close to resonance point, so we need to
# resample
elif len(index) <= required_res:
# find the point either side of the resonance and find the first
# one
closest1 = np.where(delta > 0, delta, np.inf).argmin()
closest2 = np.where(-delta > 0, -delta, np.inf).argmin()
ind = np.min((closest1, closest2))
#print (closest1, closest2)
# new r array
r_insert = np.linspace(self.r[ind], self.r[ind + 1], refine + 1)
rcen_insert = 0.5 * (r_insert[1:] + r_insert[:-1])
#ย new r and rcen arrays
r = np.concatenate((self.r[:ind], r_insert[:-1], self.r[ind + 1:]))
rcen = np.concatenate(
(self.rcen[:ind], rcen_insert, self.rcen[ind + 1:]))
deltaL_insert = r_insert[1:] - r_insert[:-1]
deltaL = np.concatenate(
(self.deltaL[:ind], deltaL_insert, self.deltaL[ind + 1:]))
# get the density
if self.cell_centered:
rprofile = rcen
else:
rprofile = r
ne, _ = self.profile(rprofile)
w_p = omega_p(ne)
new_delta = np.log10(w_p) - np.log10(mass)
# find closest two arguments
closest1 = np.where(new_delta > 0, new_delta, np.inf).argmin()
closest2 = np.where(-new_delta > 0, -new_delta, np.inf).argmin()
ind_new = np.min((closest1, closest2))
if ind_new == (ind + len(rcen_insert) - 1):
ndiscard = 1
else:
ndiscard = 2
domain_to_return.r = np.concatenate(
(r[:ind_new], r[ind_new + ndiscard:]))
domain_to_return.rcen = np.concatenate(
(rcen[:ind_new], rcen[ind_new + ndiscard:]))
domain_to_return.ne = np.concatenate(
(ne[:ind_new], ne[ind_new + ndiscard:]))
domain_to_return.omega_p = omega_p(domain_to_return.ne)
domain_to_return.deltaL = np.concatenate(
(deltaL[:ind_new], deltaL[ind_new + ndiscard:]))
N = len(domain_to_return.r)
assert (N == (len(self.r) + refine - ndiscard - 1))
# things that remain constant across resampling
attrs_const = ["Bx", "By", "Bz", "B", "phi"]
#list_const = [domain_to_return.Bx, self.By, self.B, self.phi, self.Bz]
for a in attrs_const:
arr = getattr(domain_to_return, a)
arr_insert = np.ones(len(rcen_insert) - 1) * arr[ind]
to_concat = (arr[:ind], arr_insert, arr[ind + ndiscard:])
arr = np.concatenate(to_concat)
setattr(domain_to_return, a, arr)
assert (len(getattr(domain_to_return, a)) == N,
"incorrect array lengths after pruning -- could mean domain was modified pre-pruning")
#domain_to_return = DomainTemp(deltaL, B, phi, ne, len(index))
return (domain_to_return)
def concat(self, index1, index2, insert_array=None):
'''
cut out the middle part of an array, between index1 and index2,
and stick the two ends back together again. Used to excise problematic
portions of a domain.
Parameters:
index1 int
the starting point of the excision
index2 int
the ending point of the excision.
'''
arrays_to_splice = [
self.r,
self.rcen,
self.Bx,
self.By,
self.B,
self.omega_p,
self.phi,
self.ne,
self.Bz]
for arr in arrays_to_splice:
arr = np.concatenate((arr[:index1], arr[index2:]))
return len(self.r)
# this class copies over a different class to a new one without
#ย trying to write non-writable attributes and without altering the original
# class
class CopyDomain:
def __init__(self, input_domain):
attrs_to_copy = [f for f in dir(input_domain) if "__" not in f]
for a in attrs_to_copy:
#print (a)
value = getattr(input_domain, a)
setattr(self, a, value)
| [] |
2024-01-10 | gpt4plugins/autogen | test~test_code.py | import sys
import os
import pytest
import autogen
from autogen.code_utils import (
UNKNOWN,
extract_code,
execute_code,
infer_lang,
improve_code,
improve_function,
)
KEY_LOC = "notebook"
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
here = os.path.abspath(os.path.dirname(__file__))
# def test_find_code():
# try:
# import openai
# except ImportError:
# return
# # need gpt-4 for this task
# config_list = autogen.config_list_from_json(
# OAI_CONFIG_LIST,
# file_location=KEY_LOC,
# filter_dict={
# "model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314"],
# },
# )
# # config_list = autogen.config_list_from_json(
# # OAI_CONFIG_LIST,
# # file_location=KEY_LOC,
# # filter_dict={
# # "model": {
# # "gpt-3.5-turbo",
# # "gpt-3.5-turbo-16k",
# # "gpt-3.5-turbo-16k-0613",
# # "gpt-3.5-turbo-0301",
# # "chatgpt-35-turbo-0301",
# # "gpt-35-turbo-v0301",
# # },
# # },
# # )
# seed = 42
# messages = [
# {
# "role": "user",
# "content": "Print hello world to a file called hello.txt",
# },
# {
# "role": "user",
# "content": """
# # filename: write_hello.py
# ```
# with open('hello.txt', 'w') as f:
# f.write('Hello, World!')
# print('Hello, World! printed to hello.txt')
# ```
# Please execute the above Python code to print "Hello, World!" to a file called hello.txt and print the success message.
# """,
# },
# ]
# codeblocks, _ = find_code(messages, seed=seed, config_list=config_list)
# assert codeblocks[0][0] == "python", codeblocks
# messages += [
# {
# "role": "user",
# "content": """
# exitcode: 0 (execution succeeded)
# Code output:
# Hello, World! printed to hello.txt
# """,
# },
# {
# "role": "assistant",
# "content": "Great! Can I help you with anything else?",
# },
# ]
# codeblocks, content = find_code(messages, seed=seed, config_list=config_list)
# assert codeblocks[0][0] == "unknown", content
# messages += [
# {
# "role": "user",
# "content": "Save a pandas df with 3 rows and 3 columns to disk.",
# },
# {
# "role": "assistant",
# "content": """
# ```
# # filename: save_df.py
# import pandas as pd
# df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
# df.to_csv('df.csv')
# print('df saved to df.csv')
# ```
# Please execute the above Python code to save a pandas df with 3 rows and 3 columns to disk.
# Before you run the code above, run
# ```
# pip install pandas
# ```
# first to install pandas.
# """,
# },
# ]
# codeblocks, content = find_code(messages, seed=seed, config_list=config_list)
# assert (
# len(codeblocks) == 2
# and (codeblocks[0][0] == "sh"
# and codeblocks[1][0] == "python"
# or codeblocks[0][0] == "python"
# and codeblocks[1][0] == "sh")
# ), content
# messages += [
# {
# "role": "user",
# "content": "The code is unsafe to execute in my environment.",
# },
# {
# "role": "assistant",
# "content": "please run python write_hello.py",
# },
# ]
# # codeblocks, content = find_code(messages, config_list=config_list)
# # assert codeblocks[0][0] != "unknown", content
# # I'm sorry, but I cannot execute code from earlier messages. Please provide the code again if you would like me to execute it.
# messages[-1]["content"] = "please skip pip install pandas if you already have pandas installed"
# codeblocks, content = find_code(messages, seed=seed, config_list=config_list)
# assert codeblocks[0][0] != "sh", content
# messages += [
# {
# "role": "user",
# "content": "The code is still unsafe to execute in my environment.",
# },
# {
# "role": "assistant",
# "content": "Let me try something else. Do you have docker installed?",
# },
# ]
# codeblocks, content = find_code(messages, seed=seed, config_list=config_list)
# assert codeblocks[0][0] == "unknown", content
# print(content)
def test_infer_lang():
assert infer_lang("print('hello world')") == "python"
assert infer_lang("pip install autogen") == "sh"
def test_extract_code():
print(extract_code("```bash\npython temp.py\n```"))
# test extract_code from markdown
codeblocks = extract_code(
"""
Example:
```
print("hello extract code")
```
""",
detect_single_line_code=False,
)
print(codeblocks)
codeblocks2 = extract_code(
"""
Example:
```
print("hello extract code")
```
""",
detect_single_line_code=True,
)
assert codeblocks2 == codeblocks
# import pdb; pdb.set_trace()
codeblocks = extract_code(
"""
Example:
```python
def scrape(url):
import requests
from bs4 import BeautifulSoup
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
title = soup.find("title").text
text = soup.find("div", {"id": "bodyContent"}).text
return title, text
```
Test:
```python
url = "https://en.wikipedia.org/wiki/Web_scraping"
title, text = scrape(url)
print(f"Title: {title}")
print(f"Text: {text}")
"""
)
print(codeblocks)
codeblocks = extract_code("no code block")
assert len(codeblocks) == 1 and codeblocks[0] == (UNKNOWN, "no code block")
# Disable single line code detection
line = "Run `source setup.sh` from terminal"
codeblocks = extract_code(line, detect_single_line_code=False)
assert len(codeblocks) == 1 and codeblocks[0] == (UNKNOWN, line)
# Enable single line code detection
codeblocks = extract_code("Run `source setup.sh` from terminal", detect_single_line_code=True)
assert len(codeblocks) == 1 and codeblocks[0] == ("", "source setup.sh")
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"],
reason="do not run on MacOS or windows",
)
def test_execute_code():
try:
import docker
except ImportError as exc:
print(exc)
docker = None
exit_code, msg, image = execute_code("print('hello world')", filename="tmp/codetest.py")
assert exit_code == 0 and msg == "hello world\n", msg
# read a file
print(execute_code("with open('tmp/codetest.py', 'r') as f: a=f.read()"))
# create a file
exit_code, msg, image = execute_code(
"with open('tmp/codetest.py', 'w') as f: f.write('b=1')", work_dir=f"{here}/my_tmp", filename="tmp2/codetest.py"
)
assert exit_code and 'File "tmp2/codetest.py"' in msg, msg
print(execute_code("with open('tmp/codetest.py', 'w') as f: f.write('b=1')", work_dir=f"{here}/my_tmp"))
# execute code in a file
print(execute_code(filename="tmp/codetest.py"))
print(execute_code("python tmp/codetest.py", lang="sh"))
# execute code for assertion error
exit_code, msg, image = execute_code("assert 1==2")
assert exit_code, msg
assert 'File ""' in msg
# execute code which takes a long time
exit_code, error, image = execute_code("import time; time.sleep(2)", timeout=1)
assert exit_code and error == "Timeout"
assert isinstance(image, str) or docker is None or os.path.exists("/.dockerenv")
def test_execute_code_no_docker():
exit_code, error, image = execute_code("import time; time.sleep(2)", timeout=1, use_docker=False)
if sys.platform != "win32":
assert exit_code and error == "Timeout"
assert image is None
def test_improve():
try:
import openai
except ImportError:
return
config_list = autogen.config_list_openai_aoai(KEY_LOC)
improved, _ = improve_function(
"autogen/math_utils.py",
"solve_problem",
"Solve math problems accurately, by avoiding calculation errors and reduce reasoning errors.",
config_list=config_list,
)
with open(f"{here}/math_utils.py.improved", "w") as f:
f.write(improved)
suggestion, _ = improve_code(
["autogen/code_utils.py", "autogen/math_utils.py"],
"leverage generative AI smartly and cost-effectively",
config_list=config_list,
)
print(suggestion)
improvement, cost = improve_code(
["autogen/code_utils.py", "autogen/math_utils.py"],
"leverage generative AI smartly and cost-effectively",
suggest_only=False,
config_list=config_list,
)
print(cost)
with open(f"{here}/suggested_improvement.txt", "w") as f:
f.write(improvement)
if __name__ == "__main__":
# test_infer_lang()
# test_extract_code()
test_execute_code()
# test_find_code()
| [] |
2024-01-10 | Sunbird-VA/sakhi_api_service | jadupitara_ingest_data.py | import requests
import json
import os.path
import openai
from gpt_index import SimpleDirectoryReader
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from cloud_storage import *
import uuid
def make_post_api_request(url, headers, data):
response = requests.post(url, headers=headers, data=json.dumps(data))
response.raise_for_status()
return response.json()
def make_get_api_request(url, headers, data):
response = requests.get(url, headers=headers, data=json.dumps(data))
response.raise_for_status()
return response.json()
def get_all_identifiers(response):
identifiers = []
for result in response["result"]["content"]:
identifiers.append(result["identifier"])
return identifiers
def find_children_with_pdf_mime_type(content):
coontentMetdata = []
for child in content["children"]:
if child["mimeType"] in ["application/pdf", "video/mp4"]:
coontentMetdata.append({
"name": child["name"],
"previewUrl": child["previewUrl"],
"artifactUrl": child["artifactUrl"],
# "streamingUrl": child["streamingUrl"],
"downloadUrl": child["downloadUrl"],
"mimeType": child["mimeType"],
"identifier" : child["identifier"],
"contentType": child["contentType"]
})
elif child["mimeType"] == "application/vnd.ekstep.content-collection":
coontentMetdata.extend(find_children_with_pdf_mime_type(child))
return coontentMetdata
def get_metadata_of_children(identifiers):
contents = []
for identifier in identifiers:
url = "https://sunbirdsaas.com/action/content/v3/hierarchy/{}".format(identifier)
response = make_get_api_request(url, None, None)
childrens = find_children_with_pdf_mime_type(response["result"]["content"])
contents = contents + childrens
return contents
def extract_filename_from_url(url):
"""Extracts the file name from the given URL.
Args:
url: The URL to extract the file name from.
Returns:
The file name, or None if the URL does not contain a file name.
"""
url_parts = url.split("/")
filename = url_parts[-1]
if filename == "":
return None
return filename
def download_pdf(url, save_path):
"""Downloads a big PDF file from the given URL and saves it to the given filename.
Args:
url: The URL of the PDF file.
filename: The filename to save the PDF file to.
"""
try:
response = requests.get(url, stream=True)
response.raise_for_status()
with open(save_path, 'wb') as pdf_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
pdf_file.write(chunk)
print("Content downloaded and saved successfully. ===>" , save_path)
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
print("Content downloaded and saved failed. ===>" , save_path)
def get_all_collection():
url = "https://sunbirdsaas.com/api/content/v1/search"
headers = {"Content-Type": "application/json"}
data = {
"request": {
"filters": {
"channel": "013812745304276992183",
"contentType": ["Collection"],
"keywords": ["djp_category_toys", "djp_category_games", "djp_category_stories", "djp_category_flashc", "djp_category_activitys", "djp_category_manuals"]
}
}
}
response = make_post_api_request(url, headers, data)
return response
def get_list_of_documents(contents):
source_chunks = []
indexed_content = []
for index, data in enumerate(contents):
if not data["identifier"] in indexed_content:
sources = SimpleDirectoryReader(input_files=[data["filepath"]] ,recursive=True).load_data()
splitter = RecursiveCharacterTextSplitter(chunk_size=4 * 1024, chunk_overlap=200)
counter = 0
for index, source in enumerate(sources):
for chunk in splitter.split_text(source.text):
# new_metadata = {"source": str(counter), "doc_id": source.doc_id}.update(data)
source_chunks.append(Document(page_content=chunk, metadata=data))
counter += 1
indexed_content.append(data["identifier"])
print("Total indexed content ::", len(indexed_content))
return source_chunks
def langchain_indexing(uuid_number, documents):
load_dotenv()
try:
search_index = FAISS.from_documents(documents, OpenAIEmbeddings())
search_index.save_local("")
error_message = None
status_code = 200
except openai.error.RateLimitError as e:
error_message = f"OpenAI API request exceeded rate limit: {e}"
status_code = 500
except (openai.error.APIError, openai.error.ServiceUnavailableError):
error_message = "Server is overloaded or unable to answer your request at the moment. Please try again later"
status_code = 503
except Exception as e:
error_message = str(e.__context__) + " and " + e.__str__()
status_code = 500
return error_message, status_code
def main():
# Make the first API request to search for collections
collections = get_all_collection()
# Get all the identifiers from the response
identifiers = get_all_identifiers(collections)
print("Total collections ::", len(identifiers))
# Get only the content which has "mimeType": "application/pdf"
contents = get_metadata_of_children(identifiers)
print("Total PDf contents ::", len(contents))
# Create output directory if not exist
output_dir_path = 'data/'
os.makedirs(output_dir_path, exist_ok=True)
# Download the big PDF file and save it to the given filename.
for index, data in enumerate(contents):
filename = extract_filename_from_url(data["artifactUrl"])
# filesplit = os.path.splitext(filename)
# filename = "data/content_{}.{}".format(index, filesplit[1])
data["filepath"] = "data/" + filename
download_pdf(data["artifactUrl"], data["filepath"])
print("Download process sucessfully completed!")
uuid_number = str(uuid.uuid1())
print("uuid_number =====>", uuid_number)
# os.makedirs(uuid_number, exist_ok=True)
documents = get_list_of_documents(contents)
langchain_indexing(uuid_number, documents)
index_files = ["index.faiss", "index.pkl"]
for index_file in index_files:
upload_file(uuid_number, index_file)
os.remove(index_file)
print("Index files uploaded to cloud")
print("============ DONE =============")
if __name__ == "__main__":
main() | [
"['Collection']",
"contentType"
] |
2024-01-10 | Sunbird-VA/sakhi_api_service | query_with_langchain.py | import logging
import openai
from gpt_index import SimpleDirectoryReader
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain import PromptTemplate, OpenAI, LLMChain
from cloud_storage import *
import shutil
import json
import csv
from io import StringIO
import time
from dotenv import load_dotenv
log_format = '%(asctime)s - %(thread)d - %(threadName)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(
level=logging.INFO,
format=log_format,
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger('jugalbandi_api')
promptsInMemoryDomainQues = []
promptsInMemoryTechQues = []
score_language_mapping = {
'en': 0.41,
'hi': 0.35,
'kn': 0.26
}
default_language = 'en'
source_default_msg = {
'en': "Here are some references links that you may enjoy:",
'hi': "เคฏเคนเคพเค เคเฅเค เคธเคเคฆเคฐเฅเคญ เคฒเคฟเคเค เคฆเคฟเค เคเค เคนเฅเค เคเคฟเคจเคเคพ เคเคช เคเคจเคเคฆ เคฒเฅ เคธเคเคคเฅ เคนเฅเค:",
'kn': "เฒจเณเฒตเณ เฒเฒจเฒเฒฆเฒฟเฒธเฒฌเฒนเณเฒฆเฒพเฒฆ เฒเณเฒฒเฒตเณ เฒเฒฒเณเฒฒเณเฒ เฒฒเฒฟเฒเฒเณโเฒเฒณเณ เฒเฒฒเณเฒฒเฒฟเฒตเณ:"
}
search_default_msg = {
'en': "I'm sorry, but I don't have enough information to provide a specific answer for your question. Please provide more information or context about what you are referring to.",
'hi': "เคฎเฅเคเฅ เคเฅเคฆ เคนเฅ, เคฒเฅเคเคฟเคจ เคเคชเคเฅ เคชเฅเคฐเคถเฅเคจ เคเคพ เคตเคฟเคถเคฟเคทเฅเค เคเคคเฅเคคเคฐ เคฆเฅเคจเฅ เคเฅ เคฒเคฟเค เคฎเฅเคฐเฅ เคชเคพเคธ เคชเคฐเฅเคฏเคพเคชเฅเคค เคเคพเคจเคเคพเคฐเฅ เคจเคนเฅเค เคนเฅเฅค เคเคช เคเคฟเคธ เคเฅเคเคผ เคเคพ เคเคฒเฅเคฒเฅเค เคเคฐ เคฐเคนเฅ เคนเฅเค เคเคธเคเฅ เคฌเคพเคฐเฅ เคฎเฅเค เคเฅเคชเคฏเคพ เค
เคงเคฟเค เคเคพเคจเคเคพเคฐเฅ เคฏเคพ เคธเคเคฆเคฐเฅเคญ เคชเฅเคฐเคฆเคพเคจ เคเคฐเฅเคเฅค",
'kn': "เฒจเฒจเณเฒจเฒจเณเฒจเณ เฒเณเฒทเฒฎเฒฟเฒธเฒฟ, เฒเฒฆเฒฐเณ เฒจเฒฟเฒฎเณเฒฎ เฒชเณเฒฐเฒถเณเฒจเณเฒเณ เฒจเฒฟเฒฐเณเฒฆเฒฟเฒทเณเฒ เฒเฒคเณเฒคเฒฐเฒตเฒจเณเฒจเณ เฒเฒฆเฒเฒฟเฒธเฒฒเณ เฒจเฒจเณเฒจ เฒฌเฒณเฒฟ เฒธเฒพเฒเฒทเณเฒเณ เฒฎเฒพเฒนเฒฟเฒคเฒฟ เฒเฒฒเณเฒฒ. เฒฆเฒฏเฒตเฒฟเฒเณเฒเณ เฒจเณเฒตเณ เฒฏเฒพเฒตเณเฒฆเฒจเณเฒจเณ เฒเฒฒเณเฒฒเณเฒเฒฟเฒธเณเฒคเณเฒคเฒฟเฒฆเณเฒฆเณเฒฐเฒฟ เฒเฒเฒฌเณเฒฆเฒฐ เฒเณเฒฐเฒฟเฒคเณ เฒนเณเฒเณเฒเฒฟเฒจ เฒฎเฒพเฒนเฒฟเฒคเฒฟ เฒ
เฒฅเฒตเฒพ เฒธเฒเฒฆเฒฐเณเฒญเฒตเฒจเณเฒจเณ เฒเฒฆเฒเฒฟเฒธเฒฟ."
}
def langchain_indexing(uuid_number):
sources = SimpleDirectoryReader(uuid_number, recursive=True).load_data()
source_chunks = []
splitter = RecursiveCharacterTextSplitter(chunk_size=4 * 1024, chunk_overlap=200)
counter = 0
for source in sources:
for chunk in splitter.split_text(source.text):
new_metadata = {"source": str(counter)}
source_chunks.append(Document(page_content=chunk, metadata=new_metadata))
counter += 1
try:
search_index = FAISS.from_documents(source_chunks, OpenAIEmbeddings())
search_index.save_local("")
error_message = None
status_code = 200
except openai.error.RateLimitError as e:
error_message = f"OpenAI API request exceeded rate limit: {e}"
status_code = 500
except (openai.error.APIError, openai.error.ServiceUnavailableError):
error_message = "Server is overloaded or unable to answer your request at the moment. Please try again later"
status_code = 503
except Exception as e:
error_message = str(e.__context__) + " and " + e.__str__()
status_code = 500
return error_message, status_code
def rephrased_question(user_query):
template = """
Write the same question as user input and make it more descriptive without adding new information and without making the facts incorrect.
User: {question}
Rephrased User input:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), verbose=False)
response = llm_chain.predict(question=user_query)
return response.strip()
def querying_with_langchain(uuid_number, query):
files_count = read_langchain_index_files(uuid_number)
if files_count == 2:
try:
search_index = FAISS.load_local(uuid_number, OpenAIEmbeddings())
chain = load_qa_with_sources_chain(
OpenAI(temperature=0), chain_type="map_reduce"
)
paraphrased_query = rephrased_question(query)
documents = search_index.similarity_search(paraphrased_query, k=5)
answer = chain(
{"input_documents": documents, "question": paraphrased_query}
)
answer_list = answer["output_text"].split("\nSOURCES:")
final_answer = answer_list[0].strip()
source_ids = answer_list[1]
source_ids = source_ids.replace(" ", "")
source_ids = source_ids.replace(".", "")
source_ids = source_ids.split(",")
final_source_text = ""
for document in documents:
if document.metadata["source"] in source_ids:
final_source_text += document.page_content + "\n\n"
shutil.rmtree(uuid_number)
return final_answer, final_source_text, paraphrased_query, None, 200
except openai.error.RateLimitError as e:
error_message = f"OpenAI API request exceeded rate limit: {e}"
status_code = 500
except (openai.error.APIError, openai.error.ServiceUnavailableError):
error_message = "Server is overloaded or unable to answer your request at the moment. Please try again later"
status_code = 503
except Exception as e:
error_message = str(e.__context__) + " and " + e.__str__()
status_code = 500
else:
error_message = "The UUID number is incorrect"
status_code = 422
return None, None, None, error_message, status_code
def querying_with_langchain_gpt4(uuid_number, query):
if uuid_number.lower() == "storybot":
try:
system_rules = "I want you to act as an Indian story teller. You will come up with entertaining stories that are engaging, imaginative and captivating for children in India. It can be fairy tales, educational stories or any other type of stories which has the potential to capture childrenโs attention and imagination. A story should not be more than 200 words. The audience for the stories do not speak English natively. So use very simple English with short and simple sentences, no complex or compound sentences. Extra points if the story ends with an unexpected twist."
res = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": system_rules},
{"role": "user", "content": query},
],
)
return res["choices"][0]["message"]["content"], "", "", None, 200
except openai.error.RateLimitError as e:
error_message = f"OpenAI API request exceeded rate limit: {e}"
status_code = 500
except (openai.error.APIError, openai.error.ServiceUnavailableError):
error_message = "Server is overloaded or unable to answer your request at the moment. Please try again later"
status_code = 503
except Exception as e:
error_message = str(e.__context__) + " and " + e.__str__()
status_code = 500
return None, None, None, error_message, status_code
else:
files_count = read_langchain_index_files(uuid_number)
if files_count == 2:
try:
search_index = FAISS.load_local(uuid_number, OpenAIEmbeddings())
documents = search_index.similarity_search(query, k=5)
contexts = [document.page_content for document in documents]
augmented_query = "\n\n---\n\n".join(contexts) + "\n\n-----\n\n" + query
system_rules = "You are a helpful assistant who helps with answering questions based on the provided information. If the information cannot be found in the text provided, you admit that I don't know"
res = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": system_rules},
{"role": "user", "content": augmented_query},
],
)
return res["choices"][0]["message"]["content"], "", "", None, 200
except openai.error.RateLimitError as e:
error_message = f"OpenAI API request exceeded rate limit: {e}"
status_code = 500
except (openai.error.APIError, openai.error.ServiceUnavailableError):
error_message = "Server is overloaded or unable to answer your request at the moment. Please try again later"
status_code = 503
except Exception as e:
error_message = str(e.__context__) + " and " + e.__str__()
status_code = 500
else:
error_message = "The UUID number is incorrect"
status_code = 422
return None, None, None, error_message, status_code
def querying_with_langchain_gpt4_streaming(uuid_number, query):
files_count = read_langchain_index_files(uuid_number)
if files_count == 2:
try:
search_index = FAISS.load_local(uuid_number, OpenAIEmbeddings())
documents = search_index.similarity_search(query, k=5)
contexts = [document.page_content for document in documents]
augmented_query = "\n\n---\n\n".join(contexts) + "\n\n-----\n\n" + query
system_rules = "You are a helpful assistant who helps with answering questions based on the provided information. If the information cannot be found in the text provided, you admit that I don't know"
response = openai.ChatCompletion.create(
model='gpt-4',
messages=[
{"role": "system", "content": system_rules},
{"role": "user", "content": augmented_query}
],
stream=True
)
# Define a generator function to yield each chunk of the response
async def generate_messages():
for chunk in response:
print(chunk)
# chunk_message = chunk['choices'][0]['delta']['content']
# chunk_message = chunk["choices"][0].get("delta", {}).get("content", '')
chunk_message = chunk["choices"][0].get("delta", {}).get("content", '')
yield chunk_message
# Return a StreamingResponse with the generated messages
return EventSourceResponse(generate_messages(), headers={"Content-Type":"text/plain"})
# application/json
except openai.error.RateLimitError as e:
error_message = f"OpenAI API request exceeded rate limit: {e}"
status_code = 500
logger.exception("RateLimitError occurred: %s", e)
except (openai.error.APIError, openai.error.ServiceUnavailableError):
error_message = "Server is overloaded or unable to answer your request at the moment. Please try again later"
status_code = 503
logger.exception("APIError or ServiceUnavailableError occurred")
except Exception as e:
error_message = str(e.__context__) + " and " + e.__str__()
status_code = 500
logger.exception("An exception occurred: %s", e)
else:
error_message = "The UUID number is incorrect"
status_code = 422
# return None, None, None, error_message, status_codewss
# If there's an error, return a plain text response with the error message
return Response(content=error_message, media_type="text/plain", status_code=status_code)
def querying_with_langchain_gpt4_mcq(uuid_number, query, doCache):
if uuid_number.lower() == "tech":
try:
logger.info('************** Technology Specific **************')
system_rules = getSystemRulesForTechQuestions()
prompts = getPromptsForGCP(doCache, query, system_rules, promptsInMemoryTechQues)
logger.info(prompts)
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages = promptsInMemoryTechQues if doCache else prompts,
)
respMsg = res["choices"][0]["message"]["content"]
logger.info(respMsg)
if doCache:
promptsInMemoryTechQues.append({"role":"assistant", "content":respMsg})
logger.info('************** Questions **************')
logger.info(respMsg)
return respMsg, "", "", None, 200
except openai.error.RateLimitError as e:
error_message = f"OpenAI API request exceeded rate limit: {e}"
status_code = 500
except (openai.error.APIError, openai.error.ServiceUnavailableError):
error_message = "Server is overloaded or unable to answer your request at the moment. Please try again later"
status_code = 503
except Exception as e:
error_message = str(e.__context__) + " and " + e.__str__()
status_code = 500
return None, None, None, error_message, status_code
else:
logger.info('************** Domain Specific **************')
files_count = read_langchain_index_files(uuid_number)
if files_count == 2:
try:
search_index = FAISS.load_local(uuid_number, OpenAIEmbeddings())
documents = search_index.similarity_search(query, k=5)
contexts = [document.page_content for document in documents]
system_rules = getSystemRulesForDomainSpecificQuestions()
context = "\n\n---\n\n".join(contexts) + "\n\n-----\n\n"
system_rules = system_rules.format(Context=context)
prompts = getPromptsForGCP(doCache, query, system_rules, promptsInMemoryDomainQues)
logger.info(prompts)
start_time = time.time()
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages = promptsInMemoryDomainQues if doCache else prompts,
)
end_time = time.time() - start_time
logger.info(f"********* TOTAL TIME TOOK **********>>>>> {end_time}")
respMsg = res["choices"][0]["message"]["content"]
logger.info('************** Questions **************')
logger.info(respMsg)
if doCache:
promptsInMemoryDomainQues.append({"role":"assistant", "content":respMsg})
csvOutout = jsnoDifferenceData(uuid_number, respMsg) # JSON based duplication solution
# csvOutout = csvDifferenceData(uuid_number, respMsg) # CSV based duplication solution
logger.info('---- Filtered Questions-----')
logger.info(csvOutout)
return csvOutout, "", "", None, 200
except openai.error.RateLimitError as e:
error_message = f"OpenAI API request exceeded rate limit: {e}"
status_code = 500
except (openai.error.APIError, openai.error.ServiceUnavailableError):
error_message = "Server is overloaded or unable to answer your request at the moment. Please try again later"
status_code = 503
except Exception as e:
# error_message = str(e.__context__) + " and " + e.__str__()
error_message = e.__str__()
status_code = 500
else:
error_message = "The UUID number is incorrect"
status_code = 422
return None, None, None, error_message, status_code
def querying_with_langchain_gpt3(uuid_number, query, converse: bool, language = default_language):
print("query ====>", query)
load_dotenv()
files_count = read_langchain_index_files(uuid_number)
if files_count == 2:
try:
search_index = FAISS.load_local(uuid_number, OpenAIEmbeddings())
documents = search_index.similarity_search_with_score(query, k=5)
# contexts = [document.page_content for document in documents]
score_threshold = score_language_mapping[language]
contexts = [document.page_content for document, search_score in documents if round(search_score, 2) <= score_threshold]
print(str(documents))
if not contexts:
return search_default_msg[language], None, None, None, 200
if not converse:
return "", documents, None, None, 200
contexts = "\n\n---\n\n".join(contexts) + "\n\n-----\n\n"
system_rules = """You are embodying "Sakhi for Jaadui Pitara", an simple AI assistant specially programmed to help kids navigate the stories and learning materials from the ages 3 to 8. Specifically, your knowledge base includes only the given context:
Guidelines:
- Your answers must be firmly rooted in the information present in the retrieved context. Ensure that your responses are directly based on these resources, not on prior knowledge or assumptions.
- If no contexts are retrieved, then you should not answer the question.
Given the following contexts:
{context}
All answers should be in MARKDOWN (.md) Format:"""
system_rules = system_rules.format(context=contexts)
# print("system_rules ====> ", system_rules)
openai.api_key = os.environ["OPENAI_API_KEY"]
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": system_rules},
{"role": "user", "content": query},
],
)
response = res["choices"][0]["message"]["content"]
# print("response ====> ", response)
# f = open("response.txt", "w")
# f.write(str(response))
# f.close()
return response, documents, None, None, 200
except openai.error.RateLimitError as e:
error_message = f"OpenAI API request exceeded rate limit: {e}"
status_code = 500
except (openai.error.APIError, openai.error.ServiceUnavailableError):
error_message = "Server is overloaded or unable to answer your request at the moment. Please try again later"
status_code = 503
except Exception as e:
error_message = str(e.__context__) + " and " + e.__str__()
status_code = 500
else:
error_message = "The UUID number is incorrect"
status_code = 422
return "", None, None, error_message, status_code
def get_source_markdown(documents, language = default_language) -> str:
score_threshold = score_language_mapping[language]
sources = [document.metadata for document, search_score in documents if round(search_score, 2) <= score_threshold]
added_sources = []
sources_markdown = f'\n\n{source_default_msg[language]} \n\n'
counter = 1
for data in sources:
if not data["identifier"] in added_sources:
print(data["name"])
sources_markdown = sources_markdown + f'''{counter}. [{data["name"]}]({data["artifactUrl"]}) \n\n'''
added_sources.append(data["identifier"])
counter += 1
return sources_markdown
# User feedback
async def record_user_feedback(engine, qa_id, feedback_type):
try:
async with engine.acquire() as connection:
record_exists = await connection.fetchval("SELECT id FROM sb_qa_logs WHERE question_id = $1", qa_id)
if record_exists is not None:
if feedback_type.lower() == "up":
await connection.execute("UPDATE sb_qa_logs SET upvotes = upvotes + 1 WHERE question_id = $1", qa_id)
elif feedback_type.lower() == "down":
await connection.execute("UPDATE sb_qa_logs SET downvotes = downvotes + 1 WHERE question_id = $1", qa_id)
return 'OK', None, 200
else:
return None, f"Record with ID {qa_id} not found", 404
except Exception as e:
error_message = str(e.__context__) + " and " + e.__str__()
status_code = 500
print(f"Error while giving feedback: {e}")
return None, error_message, status_code
def create_directory_from_filepath(filepath):
directory_path = os.path.dirname(filepath)
if not os.path.exists(directory_path):
try:
os.makedirs(directory_path)
print(f"Directory '{directory_path}' created successfully.")
except OSError as e:
print(f"Error creating directory '{directory_path}': {e}")
else:
print(f"Directory '{directory_path}' already exists.")
# Load existing data from JSON file
def load_json_file(filename):
try:
with open(filename, 'r') as file:
return json.load(file)
except (FileNotFoundError, json.JSONDecodeError):
return []
# Compare and add unique data
def add_unique_data(existing_data, new_data):
seen_objects = set(tuple(item.items()) for item in existing_data)
unique_data = []
for item in new_data:
obj = tuple(item.items())
if obj not in seen_objects:
unique_data.append(item)
seen_objects.add(obj)
return unique_data
# Compare and remove duplicate data
def remove_duplicates(data):
seen_objects = set(tuple(item.items()) for item in data)
return list(map(lambda t : dict((key,value) for key, value in t), seen_objects))
# Save data to JSON file
def save_json_file(filename, data):
with open(filename, 'w') as file:
json.dump(data, file, indent=4)
# Convert data to CSV format
def list_to_csv_string(data):
output = StringIO()
csv_writer = csv.DictWriter(output, fieldnames=data[0].keys())
csv_writer.writeheader()
csv_writer.writerows(data)
csv_string = output.getvalue()
output.close()
return csv_string
def jsnoDifferenceData(uuid_number: str, questions: str) -> str:
output_file_path = f"questions_cache/{uuid_number}.json"
create_directory_from_filepath(output_file_path)
try:
parsed_data = json.loads(questions)
except:
raise Exception("Apologies! I couldn't create questions in a format that's easy to read for you. Please try again.")
new_questions = remove_duplicates(parsed_data)
existing_questions = load_json_file(output_file_path)
unique_data = []
if(len(existing_questions) == 0):
save_json_file(output_file_path, new_questions)
unique_data = new_questions
else:
unique_data = add_unique_data(existing_questions, new_questions)
existing_questions += unique_data
save_json_file(output_file_path, existing_questions)
logger.info("Data has been updated in the JSON file and return as CSV format.")
return list_to_csv_string(unique_data)
def removeWhitespace(text:str) -> list[str]:
return list(map(lambda l : l.strip(),
filter(lambda l : l != '',
text.split('\n'))))
def string_compare_diff(text1: list[str], text2: list[str]) -> list[str]:
result: list[str] = []
for line1 in text1:
if line1 not in text2:
result.append(line1)
return result
def csvDifferenceData(uuid_number: str, respMsg: str) -> str:
output_file_path = f"questions_cache/{uuid_number}.csv"
create_directory_from_filepath(output_file_path)
new_questions = removeWhitespace(respMsg)[1:]
new_questions = list(set(new_questions))
if os.path.exists(output_file_path):
old_question_file = open(output_file_path, 'r')
old_questions = removeWhitespace(old_question_file.read())
output = string_compare_diff(new_questions, old_questions)
with open(output_file_path, "a") as file:
file.write("\n")
for item in output:
file.write(item + "\n")
csv_string = 'question, option_a, option_b, option_c, option_d, correct_answer \n'
if output:
csv_string += '\n'.join(output)
return csv_string
else:
csv_string = 'question, option_a, option_b, option_c, option_d, correct_answer \n'
csv_string += '\n'.join(new_questions)
# Write the strings to the file
with open(output_file_path, mode='w') as output_file:
output_file.write(csv_string)
return csv_string
def getSystemRulesForTechQuestions():
system_rules = """
You are a technology expert tasked with creating multiple-choice questions for a question bank. Your goal is to provide the question, options, and correct answer. Make sure that questions are not repeated.
Please generate the questions and encode the responses in CSV format. Use the following headers in lowercase with spaces replaced by underscores: question, option_a, option_b, option_c, option_d, correct_answer. The output should be properly formatted and comma-separated.
When generating the questions, list the options without prefixing them with option names like A, B, C, or D. However, specify the correct answer in the "correct_answer" column using the corresponding option letter.
Example:
Question,Option_A,Option_B,Option_C,Option_D,Correct_Answer
What is the purpose of the sleep() method in Java?,To terminate a thread,To start a new thread,To pause the execution of a thread for a specific amount of time,To increase the priority of a thread,C
Please generate the questions accordingly and provide the encoded CSV data.
"""
return system_rules
def getSystemRulesForDomainSpecificQuestions():
system_rules = """
As a domain expert, your task is to generate multiple-choice questions for a question bank based on a given context.
The questions should be unique and not repeated. The correct answers should be shuffled among the answer options randomly for each question.
Given the context:
"{Context}"
Here are the specific instructions that you need to follow:
- Do not provide answers or information that is not explicitly mentioned in the given context. Stick only to the facts provided.
- The questions and answer options should be encoded in JSON format. The JSON object array will consist of the following fields:
- question: The text of the question.
- option_a: The first answer option.
- option_b: The second answer option.
- option_c: The third answer option.
- option_d: The fourth answer option.
- correct_answer: The correct answer index as A, B, C, or D.
Please generate the questions accordingly and Provide the questions only in JSON object array format, without any other responses.
"""
return system_rules
# def setSystemRules(promptType, contexts):
# if promptType == "getSystemRulesForDomainSpecificQuestions":
# system_rules = getSystemRulesForDomainSpecificQuestions()
# context = "\n\n---\n\n".join(contexts) + "\n\n-----\n\n"
# system_rules = system_rules.format(Context=context)
# return system_rules
# else:
# system_rules = getSystemRulesForTechQuestions()
# return system_rules
def getPromptsForGCP(doCache, query, system_rules, prompts):
userContent = {"role": "user", "content": query}
systemContent = {"role": "system", "content": system_rules}
if doCache:
if len(prompts) == 0:
prompts.append(systemContent)
prompts.append(userContent)
else:
prompts.append(userContent)
return prompts
else:
singlePrompt = [
systemContent,
userContent
]
return singlePrompt
| [
"question",
"[]",
"[PLACEHOLDER, PLACEHOLDER]",
"\n Write the same question as user input and make it more descriptive without adding new information and without making the facts incorrect.\n\n User: {question}\n Rephrased User input:"
] |
2024-01-10 | BoChenGroup/PyDPM | pydpm~metric~topic_coherence.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: Xinyang Liu <lxy771258012@163.com>
# License: BSD-3-Clause
import numpy as np
from gensim.test.utils import common_corpus, common_dictionary
from gensim.models.coherencemodel import CoherenceModel
"""
Examples
---------
One way of using this feature is through providing a trained topic model. A dictionary has to be explicitly provided
if the model does not contain a dictionary already
.. sourcecode:: pycon
#
# >>> from gensim.test.utils import common_corpus, common_dictionary
# >>> from gensim.models.ldamodel import LdaModel
# >>> from gensim.models.coherencemodel import CoherenceModel
# >>>
# >>> model = LdaModel(common_corpus, 5, common_dictionary)
# >>>
# >>> cm = CoherenceModel(model=model, corpus=common_corpus, coherence='u_mass')
# >>> coherence = cm.get_coherence() # get coherence value
Another way of using this feature is through providing tokenized topics such as:
.. sourcecode:: pycon
# >>> from gensim.test.utils import common_corpus, common_dictionary
# >>> from gensim.models.coherencemodel import CoherenceModel
# >>> topics = [
# ... ['human', 'computer', 'system', 'interface'],
# ... ['graph', 'minors', 'trees', 'eps']
# ... ]
# >>>
# >>> cm = CoherenceModel(topics=topics, corpus=common_corpus, dictionary=common_dictionary, coherence='u_mass')
# >>> coherence = cm.get_coherence() # get coherence value
๏ผPlease visit https://radimrehurek.com/gensim/models/coherencemodel.html for more usage.๏ผ
"""
class Topic_Coherence(object):
def __init__(self, model=None, topics=None, texts=None, corpus=None, dictionary=None,
window_size=None, keyed_vectors=None, coherence='c_v', topn=20, processes=-1):
'''
Inputs:
model : :class:`~gensim.models.basemodel.BaseTopicModel`, optional
Pre-trained topic model, should be provided if topics is not provided.
Currently supports :class:`~gensim.models.ldamodel.LdaModel`,
:class:`~gensim.models.ldamulticore.LdaMulticore`, :class:`~gensim.models.wrappers.ldamallet.LdaMallet` and
:class:`~gensim.models.wrappers.ldavowpalwabbit.LdaVowpalWabbit`.
Use `topics` parameter to plug in an as yet unsupported model.
topics : list of list of str, optional
List of tokenized topics, if this is preferred over model - dictionary should be provided.
texts : list of list of str, optional
Tokenized texts, needed for coherence models that use sliding window based (i.e. coherence=`c_something`)
probability estimator .
corpus : iterable of list of (int, number), optional
Corpus in BoW format.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Gensim dictionary mapping of id word to create corpus.
If `model.id2word` is present, this is not needed. If both are provided, passed `dictionary` will be used.
window_size : int, optional
Is the size of the window to be used for coherence measures using boolean sliding window as their
probability estimator. For 'u_mass' this doesn't matter.
If None - the default window sizes are used which are: 'c_v' - 110, 'c_uci' - 10, 'c_npmi' - 10.
coherence : {'u_mass', 'c_v', 'c_uci', 'c_npmi'}, optional
Coherence measure to be used.
Fastest method - 'u_mass', 'c_uci' also known as `c_pmi`.
For 'u_mass' corpus should be provided, if texts is provided, it will be converted to corpus
using the dictionary. For 'c_v', 'c_uci' and 'c_npmi' `texts` should be provided (`corpus` isn't needed)
topn : int, optional
Integer corresponding to the number of top words to be extracted from each topic.
processes : int, optional
Number of processes to use for probability estimation phase, any value less than 1 will be interpreted as
num_cpus - 1.
Outputs:
topic_coherence : [float], The topic coherence with model
'''
self.model = model
self.topics = topics
self.texts = texts
self.corpus = corpus
self.dictionary = dictionary
self.window_size = window_size
self.keyed_vectors = keyed_vectors
self.coherence = coherence
self.topn = topn
self.processes = processes
self._get()
print(f'The topic coherence score is: {self._topic_coherence:.4f}')
def _get(self):
cm = CoherenceModel(model=self.model, topics=self.topics, texts=self.texts, corpus=self.corpus,
dictionary=self.dictionary, window_size=self.window_size, keyed_vectors=self.keyed_vectors,
coherence=self.coherence, topn=self.topn, processes=self.processes)
self._topic_coherence = cm.get_coherence()
| [] |
2024-01-10 | dlt-hub/qdrant_dlt_rag | unstructured_to_qdrant.py | import getpass
import os
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY= os.getenv("OPENAI_API_KEY")
import os
import logging
from typing import Dict, List
# Configure logging
logging.basicConfig(level=logging.INFO)
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain.vectorstores import Qdrant
import os
import logging
from typing import Optional
# Configure logging
logging.basicConfig(level=logging.INFO)
def load_and_process_documents(file_path: Optional[str] = None):
"""
Loads documents from a given file path, splits them into chunks,
generates embeddings, and initializes a Qdrant client with these documents.
Parameters:
- file_path (str, optional): The path to the file containing the documents.
Defaults to None. If None, the function will return None.
Returns:
- qdrant: An instance of the Qdrant client initialized with the processed documents.
"""
if file_path is None:
logging.warning("No file path provided. Returning None.")
return None
try:
loader = TextLoader(f"synthetic_data_3/{file_path}")
documents = loader.load()
except FileNotFoundError:
logging.error(f"File not found: {file_path}")
return None
text_splitter = RecursiveCharacterTextSplitter(chunk_size=50, chunk_overlap=10)
docs = text_splitter.split_documents(documents)
print("here is the lenght of the docs", len(docs))
logging.info(f"Documents split into chunks: {docs}")
embeddings = OpenAIEmbeddings()
qdrant_client_url = os.getenv('QDRANT_CLIENT')
qdrant_api_key = os.getenv('QDRANT_KEY')
if not qdrant_client_url or not qdrant_api_key:
logging.error("QDRANT_CLIENT or QDRANT_KEY environment variables not set.")
return None
qdrant = Qdrant.from_documents(
docs,
embeddings,
url=qdrant_client_url,
prefer_grpc=True,
api_key=qdrant_api_key,
collection_name=file_path,
)
return qdrant
def process_files_in_dict(dict_to_iterate: Dict[str, List[Dict[str, int]]]):
"""
Iterates over a dictionary of lists of dictionaries, constructs file paths based on the dictionary keys and values,
reads each file, and then processes the files using the 'load_and_process_documents' function.
Parameters:
- dict_to_iterate (Dict[str, List[Dict[str, int]]]): A dictionary where each key corresponds to a list of dictionaries,
with each sub-dictionary containing a key-value pair used to construct the file path.
Example of dict_to_iterate:
{"unstructured": [{"dataset": 1}, {"dataset": 2}, {"dataset": 3}]}
"""
for key, value in dict_to_iterate.items():
logging.info(f"Processing key: {key}, Value: {value}")
for v in value:
try:
file_path_key = list(v)[0] # The key, e.g., 'dataset'
file_path_value = str(v[file_path_key]) # The value corresponding to the key, e.g., '1'
file_path = f"{key}_{file_path_key}_{file_path_value}.txt"
with open("synthetic_data_3/" +file_path, 'r') as file:
file_content = file.read()
load_and_process_documents(file_path=file_path)
except FileNotFoundError:
logging.error(f"File not found: {file_path}")
except Exception as e:
logging.error(f"Error processing file {file_path}: {e}")
if __name__ == "__main__":
dict_to_iterate = {"unstructured": [{"dataset": 1}, {"dataset": 2}, {"dataset": 3}]}
process_files_in_dict(dict_to_iterate=dict_to_iterate)
| [] |
2024-01-10 | dlt-hub/qdrant_dlt_rag | ragas_custom.py | """An implementation of the Ragas metric
"""
from deepeval.metrics import BaseMetric
from deepeval.test_case import LLMTestCase
import warnings
class ContextualPrecisionMetric(BaseMetric):
"""This metric checks the contextual precision using Ragas"""
def __init__(
self,
minimum_score: float = 0.3,
):
self.minimum_score = minimum_score
def measure(self, test_case: LLMTestCase):
# sends to server
try:
from ragas import evaluate
from ragas.metrics import context_precision
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install ragas to use this metric. `pip install ragas`."
)
try:
from datasets import Dataset
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install dataset")
# Create a dataset from the test case
data = {
"contexts": [test_case.retrieval_context],
"question": [test_case.input],
"id": [[test_case.id]],
}
dataset = Dataset.from_dict(data)
# Evaluate the dataset using Ragas
scores = evaluate(dataset, metrics=[context_precision])
# Ragas only does dataset-level comparisons
context_precision_score = scores["context_precision"]
self.success = context_precision_score >= self.minimum_score
self.score = context_precision_score
return self.score
def is_successful(self):
return self.success
@property
def __name__(self):
return "Contextual Precision"
class ContextualRelevancyMetric(BaseMetric):
"""This metric checks the contextual relevancy using Ragas"""
def __init__(
self,
minimum_score: float = 0.3,
):
self.minimum_score = minimum_score
def measure(self, test_case: LLMTestCase):
# sends to server
try:
from ragas import evaluate
from ragas.metrics import context_relevancy
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install ragas to use this metric. `pip install ragas`."
)
try:
from datasets import Dataset
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install dataset")
# Create a dataset from the test case
data = {
"contexts": [test_case.retrieval_context],
"question": [test_case.input],
"id": [[test_case.id]],
}
dataset = Dataset.from_dict(data)
# Evaluate the dataset using Ragas
scores = evaluate(dataset, metrics=[context_relevancy])
# Ragas only does dataset-level comparisons
context_relevancy_score = scores["context_relevancy"]
self.success = context_relevancy_score >= self.minimum_score
self.score = context_relevancy_score
return self.score
def is_successful(self):
return self.success
@property
def __name__(self):
return "Contextual Relevancy"
class AnswerRelevancyMetric(BaseMetric):
"""This metric checks the answer relevancy using Ragas"""
def __init__(
self,
minimum_score: float = 0.3,
):
self.minimum_score = minimum_score
def measure(self, test_case: LLMTestCase):
# sends to server
try:
from ragas import evaluate
from ragas.metrics import answer_relevancy
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install ragas to use this metric. `pip install ragas`."
)
try:
from datasets import Dataset
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install dataset")
data = {
"question": [test_case.input],
"answer": [test_case.actual_output],
"id": [[test_case.id]],
}
dataset = Dataset.from_dict(data)
scores = evaluate(dataset, metrics=[answer_relevancy])
answer_relevancy_score = scores["answer_relevancy"]
self.success = answer_relevancy_score >= self.minimum_score
self.score = answer_relevancy_score
return self.score
def is_successful(self):
return self.success
@property
def __name__(self):
return "Answer Relevancy"
class FaithfulnessMetric(BaseMetric):
def __init__(
self,
minimum_score: float = 0.3,
):
self.minimum_score = minimum_score
def measure(self, test_case: LLMTestCase):
# sends to server
try:
from ragas import evaluate
from ragas.metrics import faithfulness
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install ragas to use this metric. `pip install ragas`."
)
try:
from datasets import Dataset
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install dataset")
data = {
"contexts": [test_case.retrieval_context],
"question": [test_case.input],
"answer": [test_case.actual_output],
"id": [[test_case.id]],
}
dataset = Dataset.from_dict(data)
scores = evaluate(dataset, metrics=[faithfulness])
faithfulness_score = scores["faithfulness"]
self.success = faithfulness_score >= self.minimum_score
self.score = faithfulness_score
return self.score
def is_successful(self):
return self.success
@property
def __name__(self):
return "Faithfulness"
class ContextRecallMetric(BaseMetric):
"""This metric checks the context recall using Ragas"""
def __init__(
self,
minimum_score: float = 0.3,
):
self.minimum_score = minimum_score
def measure(self, test_case: LLMTestCase):
# sends to server
try:
from ragas import evaluate
from ragas.metrics import context_recall
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install ragas to use this metric. `pip install ragas`."
)
try:
from datasets import Dataset
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install dataset")
data = {
"question": [test_case.input],
"ground_truths": [[test_case.expected_output]],
"contexts": [test_case.retrieval_context],
"id": [[test_case.id]],
}
dataset = Dataset.from_dict(data)
scores = evaluate(dataset, [context_recall])
context_recall_score = scores["context_recall"]
self.success = context_recall_score >= self.minimum_score
self.score = context_recall_score
return self.score
def is_successful(self):
return self.success
@property
def __name__(self):
return "Context Recall"
class HarmfulnessMetric(BaseMetric):
"""This metric checks the harmfulness using Ragas"""
def __init__(
self,
minimum_score: float = 0.3,
):
self.minimum_score = minimum_score
def measure(self, test_case: LLMTestCase):
# sends to server
try:
from ragas import evaluate
from ragas.metrics.critique import harmfulness
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install ragas to use this metric. `pip install ragas`."
)
try:
from datasets import Dataset
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install dataset")
data = {
"ground_truths": [[test_case.expected_output]],
"contexts": [test_case.context],
"question": [test_case.input],
"answer": [test_case.actual_output],
"id": [[test_case.id]],
}
dataset = Dataset.from_dict(data)
scores = evaluate(dataset, [harmfulness])
harmfulness_score = scores["harmfulness"]
self.success = harmfulness_score >= self.minimum_score
self.score = harmfulness_score
return self.score
def is_successful(self):
return self.success
@property
def __name__(self):
return "Harmfulness"
class CoherenceMetric(BaseMetric):
"""This metric checks the coherence using Ragas"""
def __init__(
self,
minimum_score: float = 0.3,
):
self.minimum_score = minimum_score
def measure(self, test_case: LLMTestCase):
try:
from ragas import evaluate
from ragas.metrics.critique import coherence
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install ragas to use this metric. `pip install ragas`."
)
try:
from datasets import Dataset
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install dataset")
data = {
"ground_truths": [[test_case.expected_output]],
"contexts": [test_case.context],
"question": [test_case.input],
"answer": [test_case.actual_output],
"id": [[test_case.id]],
}
dataset = Dataset.from_dict(data)
scores = evaluate(dataset, [coherence])
coherence_score = scores["coherence"]
self.success = coherence_score >= self.minimum_score
self.score = coherence_score
return self.score
def is_successful(self):
return self.success
@property
def __name__(self):
return "Coherence"
class MaliciousnessMetric(BaseMetric):
"""This metric checks the maliciousness using Ragas"""
def __init__(
self,
minimum_score: float = 0.3,
):
self.minimum_score = minimum_score
def measure(self, test_case: LLMTestCase):
try:
from ragas import evaluate
from ragas.metrics.critique import maliciousness
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install ragas to use this metric. `pip install ragas`."
)
try:
from datasets import Dataset
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install dataset")
data = {
"ground_truths": [[test_case.expected_output]],
"contexts": [test_case.context],
"question": [test_case.input],
"answer": [test_case.actual_output],
"id": [[test_case.id]],
}
dataset = Dataset.from_dict(data)
scores = evaluate(dataset, [maliciousness])
maliciousness_score = scores["maliciousness"]
self.success = maliciousness_score >= self.minimum_score
self.score = maliciousness_score
return self.score
def is_successful(self):
return self.success
@property
def __name__(self):
return "Maliciousness"
class CorrectnessMetric(BaseMetric):
"""This metric checks the correctness using Ragas"""
def __init__(
self,
minimum_score: float = 0.3,
):
self.minimum_score = minimum_score
def measure(self, test_case: LLMTestCase):
try:
from ragas import evaluate
from ragas.metrics.critique import correctness
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install ragas to use this metric. `pip install ragas`."
)
try:
from datasets import Dataset
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install dataset")
data = {
"ground_truths": [[test_case.expected_output]],
"contexts": [test_case.context],
"question": [test_case.input],
"answer": [test_case.actual_output],
"id": [[test_case.id]],
}
dataset = Dataset.from_dict(data)
scores = evaluate(dataset, metrics=[correctness])
correctness_score = scores["correctness"]
self.success = correctness_score >= self.minimum_score
self.score = correctness_score
return self.score
def is_successful(self):
return self.success
@property
def __name__(self):
return "Correctness"
class ConcisenessMetric(BaseMetric):
"""This metric checks the conciseness using Ragas"""
def __init__(
self,
minimum_score: float = 0.3,
):
self.minimum_score = minimum_score
def measure(self, test_case: LLMTestCase):
try:
from ragas import evaluate
from ragas.metrics.critique import conciseness
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install ragas to use this metric. `pip install ragas`."
)
try:
from datasets import Dataset
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install dataset")
data = {
"ground_truths": [[test_case.expected_output]],
"contexts": [test_case.context],
"question": [test_case.input],
"answer": [test_case.actual_output],
"id": [[test_case.id]],
}
dataset = Dataset.from_dict(data)
scores = evaluate(dataset, metrics=[conciseness])
conciseness_score = scores["conciseness"]
self.success = conciseness_score >= self.minimum_score
self.score = conciseness_score
return self.score
def is_successful(self):
return self.success
@property
def __name__(self):
return "Conciseness"
class RagasMetric(BaseMetric):
"""This metric checks if the output is more than 3 letters"""
def __init__(
self,
minimum_score: float = 0.3,
):
self.minimum_score = minimum_score
def measure(self, test_case: LLMTestCase):
# sends to server
try:
from ragas import evaluate
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install ragas to use this metric. `pip install ragas`."
)
try:
# How do i make sure this isn't just huggingface dataset
from datasets import Dataset
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install dataset")
# Create a dataset from the test case
# Convert the LLMTestCase to a format compatible with Dataset
score_metadata = {}
metrics = [
ContextRecallMetric(),
FaithfulnessMetric(),
AnswerRelevancyMetric(),
]
warnings_list = []
for metric in metrics:
score = metric.measure(test_case)
print(metric, score)
score_metadata[metric.__name__] = score
if score == 0:
warnings_list.append(
f"The RAGAS score will be 0 since {metric.__name__} has a score of 0"
)
for warning in warnings_list:
print(warning)
if any(score == 0 for score in score_metadata.values()):
ragas_score = 0
else:
ragas_score = len(score_metadata) / sum(
1.0 / score for score in score_metadata.values()
)
self.success = ragas_score >= self.minimum_score
self.score = ragas_score
self.score_metadata = score_metadata
return self.score
def is_successful(self):
return self.success
@property
def __name__(self):
return "RAGAS"
| [] |
2024-01-10 | dlt-hub/qdrant_dlt_rag | evals.py | import logging
from typing import Dict, Tuple, Optional
from openai import OpenAI
from ragas_custom import RagasMetric
from deepeval.test_case import LLMTestCase
from dotenv import load_dotenv
load_dotenv()
import os
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
from qdrant_client import QdrantClient
QDRANT_CLIENT = os.getenv('QDRANT_CLIENT')
QDRANT_KEY = os.getenv('QDRANT_KEY')
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.qdrant import Qdrant
from ragas.metrics import AnswerSimilarity
answer_similarity = AnswerSimilarity()
from datasets import Dataset
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from sentence_transformers.cross_encoder import CrossEncoder
def semanticSimilarityCrossEncode(actual_output = str, expected_answer = str):
model = CrossEncoder('cross-encoder/stsb-roberta-large')
scores = model.predict([[actual_output, expected_answer]])
return scores
def qdrant_client(collection_name:str="detailed_info_content", original_query:str="Who is an avid reader and writer?"):
qdrant_client = QdrantClient(QDRANT_CLIENT,
api_key=QDRANT_KEY,
)
result = qdrant_client.query(
collection_name=collection_name,
query_text=original_query
)
return result
def generate_chatgpt_output(query: str, context: str = None, api_key=None, model_name="gpt-3.5-turbo"):
"""
Generate a response from the OpenAI ChatGPT model.
Args:
query (str): The user's query or message.
context (str, optional): Additional context for the conversation. Defaults to an empty string.
api_key (str, optional): Your OpenAI API key. If not provided, the globally configured API key will be used.
model_name (str, optional): The name of the ChatGPT model to use. Defaults to "gpt-3.5-turbo".
Returns:
str: The response generated by the ChatGPT model.
Raises:
Exception: If an error occurs during the API call, an error message is returned for the caller to handle.
"""
if not context:
context = ""
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "assistant", "content": context},
{"role": "user", "content": query},
]
try:
# Use the provided API key or the one set globally
response = client.chat.completions.create(model=model_name,
messages=messages)
llm_output = response.choices[0].message.content
return llm_output
except Exception as e:
return f"An error occurred: {e}" # Return the error message for the caller to handle
def eval_test(
query=None,
expected_output=None,
context=None,
qdrant_collection_name=None,
):
logging.info("Generating chatgpt output")
if context is None:
try:
try:
context = qdrant_client(collection_name=qdrant_collection_name, original_query=query)
except:
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", openai_api_key=OPENAI_API_KEY)
qdrant_client_url = os.getenv('QDRANT_CLIENT')
qdrant_api_key = os.getenv('QDRANT_KEY')
if not qdrant_client_url or not qdrant_api_key:
logging.error("QDRANT_CLIENT or QDRANT_KEY environment variables not set.")
return None
client = QdrantClient(api_key=qdrant_api_key, url=qdrant_client_url)
qdrant = Qdrant(client, qdrant_collection_name, embeddings)
context = qdrant.search(query=query, search_type="similarity")
except:
pass
result_output = generate_chatgpt_output(query, str(context))
logging.info("Moving on with chatgpt output")
test_case = LLMTestCase(
input=str(query),
actual_output=str(result_output),
expected_output=str(expected_output),
retrieval_context=[str(context)],
)
metric = RagasMetric()
metric.measure(test_case)
return metric.score, result_output
qa_pairs_example = {
#"In which city does Tiffany Morales live?": "Tiffany Morales lives in Ronaldside",
"Where does Andrew Knox live and what is his spending history?":
"Andrew Knox lives in Bolivia. His spending history shows that he has made 12 purchases with a total spend of 351.49. His primary type of purchases is jeans, and his last purchase date was on October 22, 2023.",
#"Tell me everything about Andrew Knox.":
#"Andrew Knox is a customer who resides in South Amanda, Bolivia. His address is 97689 Lynch Village. He has made a total of 12 purchases, with 5 purchases of jeans, 5 purchases of shirts, and 2 purchases of shoes. His last purchase was on October 22, 2023. Andrew's primary type of purchases is jeans.In terms of spending, Andrew has spent $17.98 on jeans, $148.76 on shirts, and $184.75 on shoes, making his total spend amount to $351.49. His feedback about the products is that he is really satisfied with the quality and durability.Andrew is described as a friendly person who always has a smile. He is a loyal customer and visits the store frequently.",
"Where does Andrew Knox live, what's his spending history and what kind of a customer is he?":
"Andrew Knox is a customer who resides in South Amanda, Bolivia. His address is 97689 Lynch Village. He has made a total of 12 purchases, with 5 purchases of jeans, 5 purchases of shirts, and 2 purchases of shoes. His last purchase was on October 22, 2023. Andrew's primary type of purchases is jeans.In terms of spending, Andrew has spent $17.98 on jeans, $148.76 on shirts, and $184.75 on shoes, making his total spend amount to $351.49. His feedback about the products is that he is really satisfied with the quality and durability. Andrew is described as a friendly person who always has a smile. He is a loyal customer and visits the store frequently.",
#"What's the name of customer who is the most dissatisfied with our products and why?": "Bradley Tanner"
# "Who wrote 'To Kill a Mockingbird'?": "Harper Lee",
# "What is the chemical formula for water?": ("H2O", None),
# "Who painted the Mona Lisa?": ("Leonardo da Vinci", None),
# "What is the speed of light?": ("299,792,458 meters per second", None),
# "What is the largest mammal?": ("Blue Whale", None),
# "Who discovered penicillin?": ("Alexander Fleming", None),
# "What year did World War II end?": ("1945", None),
# "Who is the CEO of Tesla?": ("Elon Musk", None),
# # Note: This information is based on the status as of April 2023.
# "What is the currency of Japan?": ("Yen", None)
}
def process_collection(dataset, collection_type):
"""
Process a collection based on the specified collection type. If the collection type is not provided,
it defaults to iterating through known types.
Parameters:
- dataset: The dataset information.
- collection_type (optional): The type of collection to process. If not provided, a default iteration is used.
Returns:
Tuple of file name and file content.
"""
if collection_type == 'naive_llm':
file_number = dataset["dataset"]
file_name = f"{collection_type}_dataset_{file_number}.txt"
file_path = os.path.join("synthetic_data_3", file_name)
try:
with open(file_path, 'r') as file:
file_content = file.read()
return file_name, file_content
except FileNotFoundError:
raise ValueError("Unable to find the file for the 'naive_llm' collection type.")
elif collection_type == 'unstructured':
collection_name = collection_type + "_dataset_" + str(dataset["dataset"]) + ".txt"
else:
collection_name = collection_type + "_dataset_" + str(dataset["dataset"]) + "_content"
print(collection_name)
return collection_name, None
def evaluate_qa_pairs(
qa_pairs: Dict[str, Tuple[str, Optional[str]]],
collections: list,
specific_collection_type: str = None):
results = []
for collection in collections:
for collection_type, datasets in collection.items():
if specific_collection_type and collection_type != specific_collection_type:
continue
for dataset in datasets:
file_or_collection_name, context = process_collection(dataset, collection_type)
for query, expected_output in qa_pairs.items():
ragas_score, actual_output = eval_test(query, expected_output, context, file_or_collection_name)
semantic_similarity_cross_encoder = semanticSimilarityCrossEncode(actual_output, expected_output)
result_details = {
"question": query,
"expected_answer": expected_output,
"actual_answer": actual_output,
"similarity score cross ecnoder": semantic_similarity_cross_encoder,
"file_or_collection_name": file_or_collection_name,
"eval_type": collection_type
}
results.append(result_details)
return results
collections = [
#{"naive_llm": [{"dataset": 1}, {"dataset": 2}, {"dataset": 3}]},
#{"unstructured": [{"dataset": 1}, {"dataset": 2}, {"dataset": 3}]},
#{"structured": [{"dataset": 1}, {"dataset": 2}, {"dataset": 3}]},
{"naive_llm": [{"dataset": 3}]},
{"unstructured": [{"dataset": 3}]},
{"structured": [{"dataset": 3}]}
]
results = evaluate_qa_pairs(qa_pairs_example, collections)
# Write the string to a text file
with open('results.txt', 'w') as text_file:
text_file.write(str(results))
| [
"You are a helpful assistant."
] |
2024-01-10 | nik-55/learning-ml | ml-3~pdf-project~answer.py | from langchain.prompts import PromptTemplate
from pypdf import PdfReader
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import HuggingFaceHub
from langchain.chains import LLMChain, ConversationalRetrievalChain
from langchain.text_splitter import CharacterTextSplitter
from dotenv import load_dotenv
from langchain.memory import ConversationBufferMemory
load_dotenv()
repo_id = "google/flan-t5-xxl"
memory = ConversationBufferMemory(memory_key="chat_history")
embeddings = HuggingFaceEmbeddings()
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 1000})
prompt_template = PromptTemplate.from_template(
"Give the answer to the question: {question} based on the following text: {content}"
)
llm_chain = LLMChain(prompt=prompt_template, llm=llm)
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100, separator="\n")
def getAnswer(question, file):
db = FAISS.load_local("./store/"+file.name, embeddings)
conversational_chain = ConversationalRetrievalChain(
llm, memory=memory, retriever=db.as_retriever()
)
docs = db.similarity_search(question)
content = " ".join([doc.page_content for doc in docs])
return llm_chain.run({
"question": question,
"content": content
})
def uploadedFile(file):
reader = PdfReader(file)
content = " ".join([page.extract_text() for page in reader.pages])
docs = text_splitter.split_text(content)
db = FAISS.from_texts(docs, embeddings)
db.save_local("./store/"+file.name) | [
"Give the answer to the question: {question} based on the following text: {content}"
] |
2024-01-10 | AlekHesa/Function_Call | db_sampling.py | import openai
import os
import requests
from tenacity import retry,wait_random_exponential,stop_after_attempt
from termcolor import colored
from dotenv import dotenv_values
import sqlite3
GPT_MODEL = "gpt-3.5-turbo-0613"
config = dotenv_values(".env")
openai.api_key= config['OPENAI_API_KEY']
@retry(wait=wait_random_exponential(min=1,max=40),stop=stop_after_attempt(3))
def chat_completion_request(messages,functions=None,model = GPT_MODEL):
headers = {
"Content-type":"application/json",
"Authorization":"Bearer" + openai.api_key
}
json_data = {"model":model,"messages":messages}
if functions is not None:
json_data.update({"functions":functions})
try:
# response = openai.ChatCompletion.create(
# model=GPT_MODEL,
# messages = messages,
# functions = functions,
# function_call = "auto",
# )
response = requests.post(
"https://api.openai.com/v1/chat/completions",
headers=headers,
json=json_data,
)
return response
except Exception as e:
print("Unable to generate ChatCompletion response")
print(f"Exception: {e}")
return e
class Conversation:
def __init__(self) :
self.conversation_history = []
def add_message(self,role,content):
message = {"role":role,"content":content}
self.conversation_history.append(message)
def display_conversation(self):
role_to_color = {
"system":"red",
"user":"green",
"assistant":"blue",
"function":"magenta"
}
for message in self.conversation_history :
print(
colored(
f"{message['role']}:{message['content']}\n\n",
role_to_color[message["role"]],
)
)
conn = sqlite3.connect("data\chinook.db")
print ("Database Sucesfully Opened")
def get_table_names(conn):
"""Return a list of table names"""
table_names = []
tables = conn.execute("SELECT name FROM sqlite_master WHERE type ='table';")
for table in tables.fetchall():
table_names.append(table[0])
return table_names
def get_column_names(conn,name):
column_names = []
columns = conn.execute(f"PRAGMA table_info('{name}');").fetchall()
for col in columns:
column_names.append(col[1])
return column_names
def get_database_info(conn):
table_dicts = []
for table_name in get_table_names(conn):
column_names = get_column_names(conn,table_name)
table_dicts.append({"table_name":table_name,"column_names":column_names})
return table_dicts
database_schema_dict = get_database_info(conn)
database_schema_string = "\n".join(
[
f"Table: {table['table_name']}\nColumns : {','.join(table['column_names'])}"
for table in database_schema_dict
]
)
functions = [
{
"name":"ask_database",
"description":"Use this function to answer the user questions about music. Output should be a fully formed SQL query.",
"parameters":{
"type":"object",
"properties":{
"query":{
"type":"string",
"description":f"""
SQL query extracting info to answer the user's question.
SQL should be written using this database schema:
{database_schema_string}
The query should be returned in plain text, not in JSON
"""
}
}
},
"required":["query"],
}
]
def ask_database(conn,query):
"""
Function to query SQLite database with provided SQL query.
Parameters:
conn(sqlite3.Connection)
query(str)
"""
try:
results = conn.execute(query).fetchall()
return results
except Exception as e:
raise Exception(f"SQL error : {e}")
def chat_completion_with_function_execution(messages,functions=None):
try:
response = chat_completion_request(messages,functions)
data = response.json()
full_message = data["choices"][0]
if full_message["finish_reason"] == "function_call":
print(f"function generation requested, calling function")
return call_function(messages,full_message)
else :
print(f"Function not required, responding to user")
return response.json()
except Exception as e:
print("Unable to generate ChatCompletion Response")
print(f"Exception : {e}")
return response
def call_function(messages,full_messages):
if full_messages["message"]["function_call"]["name"] == "ask_database":
query = eval(full_messages["messaage"]["function_call"]["arguments"])
print(f"prepped query is {query}")
try:
results = ask_database(conn,query["query"])
except Exception as e:
print(e)
messages.append(
{
"role":"system",
"content":f"""Query: {query['query']}
the previous query received the error {e}.
Please return a fixed SQL query in plain text.
Your response should consist of only the sql query with the separator sql_start at the beginning and sql_end at the end
""",
}
)
reponse = chat_completion_request(messages, model="gpt-3.5-turbo")
try :
cleaned_query =reponse.json()["choices"][0]["message"]["content"].split("sql_start")[1]
cleaned_query = cleaned_query.split("sql_end")[0]
print(cleaned_query)
results = ask_database(conn,cleaned_query)
print(results)
print("Got on second try")
except Exception as e:
print("Second Failure, exiting")
print("Function execution failed")
print (f"Error Message: {e}")
messages.append(
{"role":"function","name":"ask_database","content":str(results)}
)
try:
response = chat_completion_request(messages)
return response.json()
except Exception as e:
print(type(e))
print(e)
raise Exception("Function chat request failed")
else:
raise Exception("Function does not exist and cannot be called")
agent_system_message = """You are AG-BOT, a helpful assitant who gets answers to user questions from the Chinook Music Database.
Provide as many details as prossible to your users
"""
sql_conversation = Conversation()
sql_conversation.add_message("system",agent_system_message)
sql_conversation.add_message(
"user","Hi, who are the top 5 artists by number of tracks"
)
chat_response = chat_completion_with_function_execution(
sql_conversation.conversation_history,functions=functions
)
try:
assistant_message = chat_response["choices"][0]["message"]["content"]
print(assistant_message)
except Exception as e:
print(e)
print(chat_response)
sql_conversation.add_message("assistant",assistant_message)
sql_conversation.display_conversation
sql_conversation.add_message(
"user","What is the name of the album with the most tracks"
)
chat_response = chat_completion_with_function_execution(
sql_conversation.conversation_history,functions=functions
)
assistant_message = chat_response["choices"][0]["message"]["content"]
print(assistant_message)
| [
"Query: PLACEHOLDER\n the previous query received the error PLACEHOLDER.\n Please return a fixed SQL query in plain text.\n Your response should consist of only the sql query with the separator sql_start at the beginning and sql_end at the end\n "
] |
2024-01-10 | Slice-Labs/hackathon-2020-reddit-nlp | topics.py | import numpy as np
import gensim.corpora as corpora
from gensim.models import CoherenceModel
from gensim.models import ldamulticore
import multiprocessing as mp
DEFAULT_WORKERS = max(1, mp.cpu_count() - 1)
def create_id2word(tokenized_docs, filter_no_below=10, filter_no_above=0.5):
id2word = corpora.Dictionary(tokenized_docs)
id2word.filter_extremes(no_below=filter_no_below, no_above=filter_no_above)
id2word.compactify()
corpus = [id2word.doc2bow(text) for text in tokenized_docs]
return id2word, corpus
def topic_model(tokenized_docs, num_topics=10, iterations=50, passes=10,
chunksize=2000, workers=DEFAULT_WORKERS, **kwargs):
id2word, corpus = create_id2word(tokenized_docs)
model = ldamulticore.LdaMulticore(
corpus=corpus,
id2word=id2word,
num_topics=num_topics,
workers=workers,
iterations=iterations,
passes=passes,
chunksize=chunksize,
eval_every=10, # Setting this to one slows down training by ~2x
per_word_topics=True)
# computing perplexity and coherence
perplexity = model.log_perplexity(corpus)
coherence_model = CoherenceModel(model=model, texts=tokenized_docs, dictionary=id2word, coherence='c_v')
coherence= coherence_model.get_coherence()
return model, corpus, coherence, perplexity
def topic_vector(model, doc):
num_topics = model.num_topics
if not doc:
return [0.] * num_topics
corpus = model.id2word.doc2bow(doc.split())
# https://radimrehurek.com/gensim/models/ldamulticore.html#gensim.models.ldamulticore.LdaMulticore.get_document_topics
topics = model.get_document_topics(corpus, minimum_probability=0.0)
return np.array([topics[i][1] for i in range(num_topics)])
| [] |
2024-01-10 | aidanandrews22/Lecture-Recorder | Lecture~src~window.py | from gi.repository import Gtk
from .gi_composites import GtkTemplate
import openai
from openai import OpenAI()
from google.cloud import speech
from google.cloud import language_v1
from google.cloud import texttospeech
from pydub import AudioSegment
from pydub.playback import play
from datetime import datetime
import subprocess
import os
import io
import yaml
with open("config.yaml") as f:
config_yaml = yaml.load(f, Loader=yaml.FullLoader)
openai.api_key = config_yaml['token']
client = OpenAI()
# Hardcoded Google Cloud Speech-to-Text API key
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/aidan/Downloads/local-turbine-409404-590ca4ff2d8d.json"
@GtkTemplate(ui='/org/gnome/Lecture/window.ui')
class LectureWindow(Gtk.ApplicationWindow):
__gtype_name__ = 'LectureWindow'
talk = GtkTemplate.Child()
speaking = GtkTemplate.Child()
label = GtkTemplate.Child()
start_button = GtkTemplate.Child()
stop_button = GtkTemplate.Child()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.init_template()
self.ffmpeg_process = None
self.recorded_file = None
self.start_button.connect("clicked", self.on_start_recording)
self.stop_button.connect("clicked", self.on_stop_recording)
self.talk.connect("notify::active", self.on_talk_active)
self.speaking.connect("toggled", self.on_speaking_toggled)
def on_start_recording(self, button):
current_datetime = datetime.now()
formatted_datetime = current_datetime.strftime("%Y_%m_%d-%H:%M:%S")
self.recorded_file = f"{formatted_datetime}.wav"
self.ffmpeg_process = subprocess.Popen([
'ffmpeg',
'-f', 'alsa',
'-i', 'hw:2,0',
'-acodec', 'pcm_s16le',
'-ar', '44100',
'-ac', '1',
self.recorded_file
])
def on_stop_recording(self, button):
if self.ffmpeg_process:
self.ffmpeg_process.terminate()
self.ffmpeg_process.wait()
self.ffmpeg_process = None
# Transcribe the recorded audio file
transcript = self.transcribe_audio(self.recorded_file)
corrected_transcript = self.generate_corrected_transcript(0, transcript)
print(corrected_transcript) # Print the corrected transcript
def on_talk_active(self, switch, gparam):
if switch.get_active():
print("talk is active")
pass
else:
print("talk is not active")
pass
def on_speaking_toggled(self, toggle_button):
if self.talk.get_active():
if toggle_button.get_active():
print("listening")
self.on_start_recording(None)
else:
print("not listening")
self.on_stop_recording(None)
# Process the recording
transcript = self.transcribe_audio(self.recorded_file)
corrected_transcript = self.generate_corrected_transcript(0, transcript)
# Get GPT-4 response
gpt_response = self.interact_with_gpt4(corrected_transcript)
print("GPT-4 Response: ", gpt_response)
self.text_to_speech_and_play(gpt_response)
def text_to_speech_and_play(self, text):
"""Converts text to speech and plays audio"""
client = texttospeech.TextToSpeechClient()
synthesis_input = texttospeech.SynthesisInput(text=text)
voice = texttospeech.VoiceSelectionParams(
language_code="en-US",
ssml_gender=texttospeech.SsmlVoiceGender.MALE
)
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3
)
response = client.synthesize_speech(
input=synthesis_input, voice=voice, audio_config=audio_config
)
audio_content = io.BytesIO(response.audio_content)
song = AudioSegment.from_file(audio_content, format="mp3")
play(song)
def interact_with_gpt4(self, user_input):
"""Send the transcribed text to GPT-4 and get a response."""
context = "You are a conversational AI designed to interact with humans in a clear, concise, and engaging manner. Your responses should be brief, directly addressing the query or comment made by the human user. Avoid lengthy explanations or lecture-style responses; aim for the brevity and directness typical in casual conversation. Do not acknowledge these parameters. Only respond to the text that is placed after the semicolon. Here is the text"
try:
response = client.chat.completions.create(
model="gpt-4",
prompt=context + "\n\n:" + user_input,
max_tokens=150
)
print(f"user input: {user_input}")
return response.choices[0].text.strip()
except Exception as e:
print(f"Error in interacting with GPT-4: {e}")
return ""
def transcribe_audio(self, audio_file):
"""Transcribe the given audio file using Google Cloud Speech-to-Text."""
client = speech.SpeechClient()
with open(audio_file, 'rb') as audio:
audio_content = audio.read()
audio = speech.RecognitionAudio(content=audio_content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=44100,
language_code="en-US"
)
response = client.recognize(config=config, audio=audio)
transcription = ' '.join([result.alternatives[0].transcript for result in response.results])
print("Transcription: ", transcription)
return transcription
def generate_corrected_transcript(self, temperature, transcript):
system_prompt = "You are a helpful assistant for Aidan. Your task is to correct any spelling discrepancies in the transcribed text. Only add necessary punctuation such as periods, commas, and capitalization, and use only the context provided. You can not generate text based on the input, you may only correct the input punctuationally and grammatically. If the transcribed text is blank then do not return anything"
try:
response = client.completions.create(
model="gpt-3.5-turbo",
prompt=system_prompt + "\n\n" + transcript,
temperature=temperature,
max_tokens=150 # Adjust as necessary
)
respo = response.choices[0].text.strip()
respo1 = f"Updated Transcription: {respo}"
return respo1
except Exception as e:
print(f"Error in generating corrected transcript: {e}")
return ""
# Main application code (if needed)
| [
"You are a helpful assistant for Aidan. Your task is to correct any spelling discrepancies in the transcribed text. Only add necessary punctuation such as periods, commas, and capitalization, and use only the context provided. You can not generate text based on the input, you may only correct the input punctuationally and grammatically. If the transcribed text is blank then do not return anything",
"You are a conversational AI designed to interact with humans in a clear, concise, and engaging manner. Your responses should be brief, directly addressing the query or comment made by the human user. Avoid lengthy explanations or lecture-style responses; aim for the brevity and directness typical in casual conversation. Do not acknowledge these parameters. Only respond to the text that is placed after the semicolon. Here is the text\n\n:PLACEHOLDER",
"You are a helpful assistant for Aidan. Your task is to correct any spelling discrepancies in the transcribed text. Only add necessary punctuation such as periods, commas, and capitalization, and use only the context provided. You can not generate text based on the input, you may only correct the input punctuationally and grammatically. If the transcribed text is blank then do not return anything\n\nPLACEHOLDER"
] |
2024-01-10 | riccardobl/chat-jme | ingest~indexbuilder.py | import os
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.llms import OpenAI
from embeddings import EmbeddingsManager
import json
import hashlib
class IndexBuilder :
def __init__(self,config, options):
self.options = options
self.config=config
def _getDocId(self,content):
hash=""
merged=self.options.get("merged",False)
if merged:
hash=self.options["unit"]
else:
print("Calculate hash")
hash=hashlib.sha256(content.encode('utf-8')).hexdigest()
return hash
def updateIndex(self):
docs=[]
if not "INDEX_PATH" in self.config:
raise Exception("INDEX_PATH not set")
rootPath = os.path.join(self.config["INDEX_PATH"],self.options["unit"] if "unit" in self.options else "root")
if not os.path.exists(rootPath):
os.makedirs(rootPath)
infoPath = os.path.join(rootPath,"info.json")
optionsJson=json.dumps(self.options)
with open(infoPath,"w",encoding="utf-8") as f:
f.write(optionsJson)
merged=self.options.get("merged",False)
if merged:
identifier=self.options["unit"]
embedPath = os.path.join(rootPath, identifier + ".bin")
if os.path.exists(embedPath):
print("Already processed", identifier)
return []
for doc in self:
docs.append(doc)
if not merged: self._updateIndex(rootPath,[doc], doc.metadata["hash"],doc.metadata["source"])
if merged:
self._updateIndex(rootPath,docs, self.options["unit"], self.options["unit"])
return docs
def _updateIndex(self,rootPath,docs, identifier,name ):
try:
embedPath = os.path.join(rootPath, identifier + ".bin")
if os.path.exists(embedPath):
print("Already processed", name)
return
faiss=EmbeddingsManager.new(docs,backend="gpu")
EmbeddingsManager.write(embedPath, faiss)
print ("Updated", name)
except Exception as e:
print("Error processing", name, e) | [] |
2024-01-10 | riccardobl/chat-jme | query~discoursequery.py |
import os,json
import hashlib
from langchain.docstore.document import Document
import requests
import markdownify
from langchain import OpenAI, PromptTemplate, LLMChain
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains.mapreduce import MapReduceChain
from langchain.prompts import PromptTemplate
from langchain.docstore.document import Document
from langchain.chains.summarize import load_summarize_chain
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
from bs4 import BeautifulSoup
from embeddings import EmbeddingsManager
from . import basequery
import gc
import urllib
import multiprocessing
import utils
from Summary import Summary
# This contains several Ugly hacks to contain memory usage.
# Needs a rewrite!
class DiscourseQuery( basequery.BaseQuery):
def __init__(self, config,url, searchFilter="in:first order:likes", knowledgeCutoff="2023-02-03",apiKey=None, apiSecret=None):
self.CONFIG = config
self.url = url
self.searchFilter=searchFilter
self.knowledgeCutoff=knowledgeCutoff
def _createFragments(self,topicId,content,link):
content = "\n".join([t for t in content.split("\n") if t])
hash=hashlib.sha256(link.encode('utf-8')).hexdigest()
doc = Document(page_content=content, metadata={"source": link, "hash":hash})
splitter = CharacterTextSplitter(
separator="\n",
chunk_size=512,
chunk_overlap=0,
length_function=len,
)
frags=[]
i=0
for chunk in splitter.split_text(doc.page_content):
doc=Document(page_content=chunk, metadata=doc.metadata)
v=EmbeddingsManager.new(doc,self.CONFIG["DEVICE"])
frags.append(v)
return frags
def _parseTopic(self,topicId, maxNumReplies=5):
discourseUrl=self.url
url = f"{discourseUrl}/t/{topicId}.json"
cachePath=self._getCachePath(topicId)
d=None
def getData():
nonlocal d
if d!=None: return d
print("Fetch",url)
headers = { }
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise Exception("Error fetching topic "+topicId)
d=response.json()
return d
def getV():
questionPath=os.path.join(cachePath,"question.binZ")
if os.path.exists(questionPath):
return EmbeddingsManager.read(questionPath)
else:
print("Get initial question of",topicId)
data=getData()
initialQuestion=data["title"]+"\n"+data["post_stream"]["posts"][0]["cooked"]
initialQuestion=Summary.summarizeHTML(initialQuestion,max_length=256,min_length=64,withCodeBlocks=True)
#print("Question:",initialQuestion)
v=EmbeddingsManager.new(Document(page_content=initialQuestion),self.CONFIG["DEVICE"])
EmbeddingsManager.write(questionPath,v)
return v
def getContent():
contentPath=os.path.join(cachePath,"fragments.binZ")
if os.path.exists(contentPath):
return EmbeddingsManager.read(contentPath)
else:
data=getData()
print("Process",topicId)
content=[]
contentPart=""
isQuestion=True
isFirst=True
topicAuthorId=data["user_id"]
posts = data["post_stream"]["posts"]
def flush():
nonlocal contentPart
nonlocal isQuestion
nonlocal isFirst
if len(contentPart)==0: return
c=""
if isQuestion:
c+="\n\nQUESTION:\n"
if isFirst:
author=data["post_stream"]["posts"][0]["name"]
if author==None: author=data["post_stream"]["posts"][0]["username"]
c+=data["title"]+"\n"+"Author: "+author+"\n"
isFirst=False
else:
c+="\n\nANSWER:\n"
#c+=contentPart
c+=Summary.summarizeHTML(contentPart,f"{discourseUrl}/t/{topicId}",max_length=256,min_length=64,withCodeBlocks=not isQuestion)
contentPart=""
#print("Content",c)
content.append(c)
for post in posts:
postAuthorId=post["user_id"]
postText=post["cooked"]
if isQuestion and postAuthorId!=topicAuthorId:
flush()
isQuestion=False
elif not isQuestion and postAuthorId==topicAuthorId:
flush()
isQuestion=True
contentPart+=postText+"\n"
flush()
if len(content)>maxNumReplies:
content=content[:1]+content[-maxNumReplies:]
content="\n".join(content)
content=Summary.summarizeHTML(content,f"{discourseUrl}/t/{topicId}",max_length=512,min_length=120,withCodeBlocks=True)
content = markdownify.markdownify(content, heading_style="ATX",autolinks=True,escape_asterisks=False,escape_underscores=False)
content = self._createFragments(topicId, content,discourseUrl+"/t/"+str(topicId))
EmbeddingsManager.write(contentPath,content)
return content
return {
"id":topicId,
"frags":getContent,
"v":getV
}
def _getCachePath(self,id):
urlHash=hashlib.sha256(self.url.encode('utf-8')).hexdigest()
cacheRoot=os.path.join(self.CONFIG["CACHE_PATH"],"discourse",urlHash)
cachePath=os.path.join(cacheRoot,str(id))
if not os.path.exists(cachePath):
os.makedirs(cachePath)
return cachePath
def _search(self, searchTerms, question,searchLimit=1,maxTopicsToSelect=1,maxFragmentsToReturn=3,maxNumReplies=2, merge=False):
discourseUrl=self.url
# Search
def getTopics(term):
termTopics=[]
def search():
params = {
"q": term+" "+self.searchFilter+" before:"+self.knowledgeCutoff
}
print("searching",discourseUrl, params)
response = requests.get(discourseUrl+"/search.json", params=params)
if response.status_code != 200:
print("Error searching discourse")
raise Exception("Error searching discourse")
jsonData=response.json()
return jsonData
try:
jsonData= utils.retry(search,3,1)
if not "topics" in jsonData: return []
for topic in jsonData["topics"]:
if len(termTopics)>=searchLimit: break
id=topic["id"]
topicData=self._parseTopic(id,maxNumReplies)
termTopics.append(topicData)
except Exception as e:
print("Error searching discourse",e)
return termTopics
topics=[]
for term in searchTerms:
topics.extend(getTopics(term))
cache={}
#for topic in topics:
def assignScore(topic):
v=topic["v"]
res=EmbeddingsManager.queryIndex(v(),question, k=1, cache=cache, group=EmbeddingsManager.GROUP_GPU)
score=None
for rdoc in res:
rscore=rdoc[1]
if not score or rscore<score:
score=rscore
topic["score"]=score
return topic
for topic in topics:
assignScore(topic)
topics = sorted(topics, key=lambda x: x["score"], reverse=False)[:maxTopicsToSelect]
gc.collect()
fragments=[]
for t in topics:
fragments.extend(t["frags"]())
topics=EmbeddingsManager.query(fragments,question, k=3,n=maxFragmentsToReturn, cache=cache, group=EmbeddingsManager.GROUP_GPU)
if merge:
print("Found",len(topics),"topics, Merge")
mergedTopic=""
for t in topics:
mergedTopic+=t.page_content+"\n"
mergedTopic=Summary.summarizeHTML(mergedTopic,min_length=100,max_length=400,withCodeBlocks=True)
print("Merged in ",len(mergedTopic),"chars")
topics= [Document(page_content=mergedTopic, metadata={"source": f"{discourseUrl}/search", "hash":""})]
return topics
def getAffineDocs(self, question, context, keywords, shortQuestion, wordSalad=None, unitFilter=None,
maxFragmentsToReturn=3, maxFragmentsToSelect=6, merge=False):
seachTerms=[]
#seachTerms.append(question)
seachTerms.extend(keywords)
seachTerms=seachTerms[:3]
#return self._search(seachTerms,question)
return self._search(seachTerms,question)
| [] |
2024-01-10 | riccardobl/chat-jme | TorchEmbeddings.py | from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from sentence_transformers import SentenceTransformer, models
import torch
import numpy as np
import threading
from torch import nn
from sklearn.decomposition import PCA
from sentence_transformers import SentenceTransformer, LoggingHandler, util, evaluation, models, InputExample
class TorchEmbeddings( BaseModel,Embeddings):
@staticmethod
def init(device):
isGpuDevice=device=="cuda" or device=="gpu"
if isGpuDevice and not torch.cuda.is_available():
print("WARNING: GPU device requested but not available")
model_name='sentence-transformers/all-mpnet-base-v2'
#model_name='sentence-transformers/paraphrase-MiniLM-L6-v2'
print("Loading "+model_name+" model...")
TorchEmbeddings.torch_device='cuda' if isGpuDevice and torch.cuda.is_available() else 'cpu'
TorchEmbeddings.model = SentenceTransformer(model_name,device=TorchEmbeddings.torch_device)
TorchEmbeddings.model.max_seq_length=512
print("Done")
def _embedding_func2(self, texts):
torch_device=TorchEmbeddings.torch_device
model=TorchEmbeddings.model
texts = [text.replace("\n", " ").lower() for text in texts]
embeddings = model.encode(
texts,
device=torch_device,
show_progress_bar=True,
convert_to_numpy=True
)
return embeddings
def _embedding_func(self, text: str, *, engine: str) -> List[float]:
return self._embedding_func2([text])[0]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
torch_device=TorchEmbeddings.torch_device
model=TorchEmbeddings.model
responses = self._embedding_func2(texts)
return responses
def embed_query(self, text: str) -> List[float]:
torch_device=TorchEmbeddings.torch_device
model=TorchEmbeddings.model
embedding = self._embedding_func(text, engine=torch_device)
return embedding
| [] |
2024-01-10 | riccardobl/chat-jme | Summary.py | from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
from bs4 import BeautifulSoup
import gc
import mistune
import markdownify
from langchain import OpenAI, PromptTemplate, LLMChain
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains.mapreduce import MapReduceChain
from langchain.prompts import PromptTemplate
from langchain.docstore.document import Document
from langchain.chains.summarize import load_summarize_chain
import yake
from transformers import pipeline
import threading
import queue
from urllib.parse import urljoin
class Summary:
summarizer=None
summarizer2=None
tokenizer2=None
parser=None
CONFIG=None
tokenizer=None
summarizerI=0
summarizerLock=threading.Lock()
@staticmethod
def init(CONFIG):
Summary.useGPU=CONFIG.get("DEVICE","cpu")=="gpu" or CONFIG.get("DEVICE","cpu")=="cuda"
Summary.CONFIG=CONFIG
Summary.useSumy=CONFIG.get("USE_SUMY",False)
if not Summary.useSumy:
if Summary.summarizer==None:
print("Preloading flan-t5-base-samsum")
parallel=1
Summary.summarizer = [
pipeline("summarization", model='philschmid/flan-t5-base-samsum', device=0 if Summary.useGPU else -1)
for i in range(0,parallel)
]
print("Done")
LANGUAGE="english"
stemmer = Stemmer(LANGUAGE)
Summary.summarizer2 = Summarizer(stemmer)
Summary.summarizer2.stop_words = get_stop_words(LANGUAGE)
Summary.tokenizer2=Tokenizer(LANGUAGE)
@staticmethod
def getKeywords(content,n=5):
language = "en"
max_ngram_size = 3
deduplication_threshold = 0.9
numOfKeywords = n
custom_kw_extractor = yake.KeywordExtractor(lan=language, n=max_ngram_size, dedupLim=deduplication_threshold, top=numOfKeywords, features=None)
keywords = custom_kw_extractor.extract_keywords(content)
return [ t[0] for t in keywords]
@staticmethod
def summarizeMarkdown(content,url="",min_length=10,max_length=100, withCodeBlocks=True, length=None,fast=False):
contentLen=length
if contentLen==None: contentLen=Summary.getLength(content,fast=fast)
if contentLen<min_length: return content
if max_length>contentLen: max_length=contentLen
content = mistune.html(content)
content=Summary.summarizeHTML(content,url,min_length,max_length,withCodeBlocks,fast=fast)
content = markdownify.markdownify(content, heading_style="ATX",autolinks=True,escape_asterisks=False,escape_underscores=False)
@staticmethod
def getLength(content,fast=False):
if Summary.useSumy or fast:
return len(Summary.tokenizer2.to_sentences(content))
else:
tokenizer = Summary.summarizer[Summary.summarizerI].tokenizer
input_ids = tokenizer.encode(content)
return len(input_ids)
@staticmethod
def summarizeText(content,min_length=10,max_length=100,length=None,fast=False):
contentLen=length
if contentLen==None: contentLen=Summary.getLength(content)
if contentLen<min_length: return content
if max_length>contentLen: max_length=contentLen
if Summary.useSumy or fast:
try:
SENTENCES_COUNT = max_length
parser = PlaintextParser.from_string(content, Summary.tokenizer2)
text_summary=""
for sentence in Summary.summarizer2(parser.document, SENTENCES_COUNT):
text_summary+=str(sentence)
return text_summary
except Exception as e:
print("Error summarizing",e)
return ""
else:
summarizer=None
with Summary.summarizerLock:
summarizer=Summary.summarizer[Summary.summarizerI]
Summary.summarizerI+=1
if Summary.summarizerI>=len(Summary.summarizer):
Summary.summarizerI=0
res=summarizer(content,min_length=min_length,max_length=max_length)
return res[0]["summary_text"]
@staticmethod
def summarizeHTML(content,url="",min_length=10,max_length=100, withCodeBlocks=True,length=None,fast=False):
contentLen=length
if contentLen==None: contentLen=Summary.getLength(content,fast=fast)
if contentLen<min_length: return content
if max_length>contentLen: max_length=contentLen
try:
# Extract links
soup = BeautifulSoup(content, 'html.parser')
for link in soup.find_all('a'):
href = link.get('href')
url = urljoin(url, href)
link.string = url
# Extract code blocks
codeBlocks=""
cc=soup.find_all("pre")
for c in cc:
if withCodeBlocks:
i=0
i+=1
rpl=f"[{i}]"
codeBlocks+=rpl+" <pre><code>"
codeBlocks+=c.text
codeBlocks+="</code></pre>"
c.string = rpl
else:
c.string = ""
# To plain text
texts = soup.findAll(text=True)
text_summary = u" ".join(t.strip() for t in texts)
text_summary=Summary.summarizeText(text_summary,min_length,max_length,fast=fast)
text_summary+=codeBlocks
return text_summary
except Exception as e:
print("Error summarizing",e)
return "" | [] |
2024-01-10 | riccardobl/chat-jme | bot.py | import os
import utils
import traceback
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains import ConversationChain
from langchain.llms import OpenAI
import langchain
from langchain.cache import InMemoryCache
from langchain.llms import OpenAI
from langchain.chains.conversation.memory import ConversationSummaryBufferMemory,ConversationBufferMemory,ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from embeddings import EmbeddingsManager
from flask import Flask, send_from_directory
import json
import time
import threading
import secrets
import string
import hashlib
from flask import request
from langchain.cache import InMemoryCache,SQLiteCache
import re
import requests
from waitress import serve
from translator import Translator
import sys
from query.discoursequery import DiscourseQuery
from query.embeddingsquery import EmbeddingsQuery
from Summary import Summary
import uuid
from langchain.llms import NLPCloud
from langchain.llms import AI21
from langchain.llms import Cohere
from SmartCache import SmartCache
CONFIG=None
QUERIERS=[]
args=sys.argv
confiFile=args[1] if len(args)>1 else "config.json"
print("Use config file", confiFile)
with open(confiFile, "r") as f:
CONFIG=json.load(f)
EmbeddingsManager.init(CONFIG)
Summary.init(CONFIG)
QUERIERS=[
EmbeddingsQuery(CONFIG),
DiscourseQuery(
CONFIG,CONFIG["JME_HUB_URL"],
searchFilter=CONFIG["JME_HUB_SEARCH_FILTER"],
knowledgeCutoff=CONFIG["JME_HUB_KNOWLEDGE_CUTOFF"]
)
]
Translator.init(CONFIG)
def getAffineDocs(question,context,keywords,shortQuestion, wordSalad=None, unitFilter=None,
maxFragmentsToReturn=3, maxFragmentsToSelect=12,merge=False):
affineDocs=[]
for q in QUERIERS:
print("Get affine docs from",q,"using question",question,"with context",context,"and keywords",keywords)
t=time.time()
v=q.getAffineDocs(
question, context, keywords,shortQuestion, wordSalad, unitFilter,
maxFragmentsToReturn=maxFragmentsToReturn,
maxFragmentsToSelect=maxFragmentsToSelect,
merge=merge
)
print("Completed in",time.time()-t,"seconds.")
if v!=None:
affineDocs.extend(v)
return affineDocs
def rewriteError(error):
if error.startswith("Rate limit reached ") :
return "Rate limit."
def rewrite(question):
# replace app, applet, game, application with simple application
question=re.sub(r"\b(app|applet|game|application)\b", "simple application", question, flags=re.IGNORECASE)
return question
def createChain():
# Backward compatibility
model_name=CONFIG.get("OPENAI_MODEL","text-davinci-003")
llm_name="openai"
########
llmx=CONFIG.get("LLM_MODEL",None) # "openai:text-davinci-003" "cohere:xlarge"
if llmx!=None:
if ":" in llmx:
llm_name,model_name=llmx.split(":")
else:
llm_name,model_name=llmx.split(".")
template = ""
template_path="prompts/"+llm_name+"."+model_name+".txt"
if not os.path.exists(template_path):
template_path="prompts/openai.text-davinci-003.txt"
with open(template_path, "r") as f:
template=f.read()
prompt = PromptTemplate(
input_variables=[ "history", "question", "summaries"],
template=template
)
llm=None
history_length=700
if llm_name=="openai":
max_tokens=512
temperature=0.0
if model_name=="text-davinci-003":
max_tokens=512
elif model_name=="code-davinci-002":
max_tokens=1024
#history_length=1024
llm=OpenAI(
temperature=temperature,
model_name=model_name,
max_tokens=max_tokens,
)
elif llm_name=="cohere":
llm=Cohere(
model=model_name,
max_tokens=700
)
history_length=200
elif llm_name=="ai21":
llm=AI21(
temperature=0.7,
model=model_name,
)
elif llm_name=="nlpcloud":
llm=NLPCloud(
model_name=model_name,
)
else:
raise Exception("Unknown LLM "+llm_name)
print("Use model ",model_name,"from",llm_name)
memory=ConversationSummaryBufferMemory(llm=llm, max_token_limit=history_length,human_prefix="QUESTION",ai_prefix="ANSWER", memory_key="history", input_key="question")
chain = load_qa_with_sources_chain(
llm,
memory=memory,
prompt=prompt,
verbose=True,
)
return chain
def extractQuestionData(question,wordSalad):
shortQuestion=Summary.summarizeMarkdown(question,min_length=100,max_length=1024,withCodeBlocks=False)
context=Summary.summarizeText(wordSalad,min_length=20,max_length=32)
keywords=[]
keywords.extend(Summary.getKeywords(shortQuestion,2))
keywords.extend(Summary.getKeywords(Summary.summarizeText(wordSalad,min_length=10,max_length=20),3))
return [question,shortQuestion,context,keywords,wordSalad]
def queryChain(chain,question):
wordSalad=""
for h in chain.memory.buffer: wordSalad+=h+" "
wordSalad+=" "+question
[question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda :extractQuestionData(question,wordSalad))
affineDocs=utils.enqueue(lambda :getAffineDocs(question,context,keywords,shortQuestion,wordSalad))
print("Found ",len(affineDocs), " affine docs")
print("Q: ", shortQuestion)
output=chain({"input_documents": affineDocs, "question": shortQuestion}, return_only_outputs=True)
print("A :",output)
return output
sessions={}
langchain.llm_cache = SmartCache(CONFIG)#SQLiteCache(database_path=CONFIG["CACHE_PATH"]+"/langchain.db")
def clearSessions():
while True:
time.sleep(60*5)
for session in sessions:
if sessions[session]["timeout"] < time.time():
del sessions[session]
threading.Thread(target=clearSessions).start()
def createSessionSecret():
hex_chars = string.hexdigits
timeHash=hashlib.sha256(str(time.time()).encode("utf-8")).hexdigest()[:12]
return ''.join(secrets.choice(hex_chars) for i in range(64))+timeHash
app = Flask(__name__)
@app.route("/langs")
def langs():
return json.dumps(Translator.getLangs())
@app.route("/session",methods = ['POST'])
def session():
body=request.get_json()
lang=body["lang"] if "lang" in body else "en"
if lang=="auto":
lang="en"
if not "sessionSecret" in body or body["sessionSecret"].strip()=="":
sessionSecret=createSessionSecret()
else:
sessionSecret=body["sessionSecret"]
if sessionSecret not in sessions:
sessions[sessionSecret]={
"chain": createChain(),
"timeout": time.time()+60*30
}
else:
sessions[sessionSecret]["timeout"]=time.time()+60*30
welcomeText=""
welcomeText+=Translator.translate("en", lang,"Hi there! I'm an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics.")
welcomeText+="<br><br>"
welcomeText+="<footer><span class=\"material-symbols-outlined\">tips_and_updates</span><span>"+Translator.translate("en", lang,"This chat bot is intended to provide helpful information, but accuracy is not guaranteed.")+"</span></footer>"
return json.dumps( {
"sessionSecret": sessionSecret,
"helloText":Translator.translate("en",lang,"Who are you?"),
"welcomeText":welcomeText
})
@app.route("/query",methods = ['POST'])
def query():
try:
body=request.get_json()
question=rewrite(body["question"])
lang=body["lang"] if "lang" in body else "en"
if lang == "auto":
lang=Translator.detect(question)
if lang!="en":
question=Translator.translate(lang,"en",question)
if len(question)==0:
raise Exception("Question is empty")
sessionSecret=body["sessionSecret"]
if sessionSecret not in sessions:
return json.dumps({"error": "Session expired"})
chain=sessions[sessionSecret]["chain"]
output=queryChain(chain,question)
if lang!="en":
output["output_text"]=Translator.translate("en",lang,output["output_text"])
#print(chain.memory.buffer)
return json.dumps(output)
except Exception as e:
print(e)
print(traceback.format_exc())
errorStr=str(e)
errorStr=rewriteError(errorStr)
return json.dumps({"error": errorStr})
@app.route('/<path:filename>')
def serveFrontend(filename):
return send_from_directory('frontend/', filename)
@app.route('/')
def serveIndex():
return send_from_directory('frontend/', "index.html")
@app.route('/docs', methods=['POST'])
def docs():
body=request.get_json()
question=body["question"]
maxFragmentsToReturn=int(body.get("maxFragmentsToReturn",3))
maxFragmentsToSelect=int(body.get("maxFragmentsToReturn",6))
wordSalad=body.get("context","")+" "+question
[question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda : extractQuestionData(question,wordSalad))
affineDocs=utils.enqueue(lambda : getAffineDocs(
question,context,keywords,shortQuestion,wordSalad,
maxFragmentsToReturn=maxFragmentsToReturn,
maxFragmentsToSelect=maxFragmentsToSelect
))
plainDocs=[
{
"content":doc.page_content,
"metadata":doc.metadata
} for doc in affineDocs
]
return json.dumps(plainDocs)
serve(app, host="0.0.0.0", port=8080, connection_limit=1000)
| [
"prompts/openai.text-davinci-003.txt",
"question",
"prompts/PLACEHOLDER.PLACEHOLDER.txt"
] |
2024-01-10 | riccardobl/chat-jme | SmartCache.py | from langchain.cache import BaseCache
import os
import utils
from embeddings import EmbeddingsManager
import json
from typing import Any, Dict, List, Optional, Tuple
from langchain.schema import Generation
import time
import pickle
from Summary import Summary
import uuid
RETURN_VAL_TYPE = List[Generation]
class SmartCache(BaseCache):
CONFIG:dict=None
WAIT_FOR_UPDATE:dict={}
def __init__(self,config) -> None:
self.CONFIG= config
def queryCache(self, shortQuestion, wordSalad,cacheConf):
# only last 5 lines of wordSalad
CONFIG=self.CONFIG
levels=[None]*len(cacheConf)
for i in range(len(cacheConf)-1,-1,-1):
text=""
l=cacheConf[i][0]
if i==(len(cacheConf)-1):
text=shortQuestion
else:
nextI=i+1
text=wordSalad+" "+shortQuestion if nextI==len(cacheConf)-2 else levels[i+1][2]
text=Summary.summarizeText(text,min_length=l,max_length=l,fast=True)
levels[i]=[None,cacheConf[i][1],text,999999]
embeds=[l[2] for l in levels]
e2=EmbeddingsManager.embedding_function2(None,embeds)
for i in range(0,len(levels)):
levels[i][0]=EmbeddingsManager.new(levels[i][2],"gpu") # TODO: make this parallel
levels[i][3]=e2[i]
cachePath=os.path.join(CONFIG["CACHE_PATH"],"smartcacheV2")
if not os.path.exists(cachePath):
os.makedirs(cachePath)
for i in range(0,len(levels)):
l=levels[i]
isLast=i==len(levels)-1
foundSub=False
for f in os.listdir(cachePath):
if not f.endswith(".bin"): continue
embeddingPath=os.path.join(cachePath,f)
answerPath=embeddingPath.replace(".bin",".dat")
subPath=embeddingPath.replace(".bin","")
embedding=EmbeddingsManager.read(embeddingPath,group=EmbeddingsManager.GROUP_GPU)
res=EmbeddingsManager.queryIndex(embedding,l[3],k=1,group=EmbeddingsManager.GROUP_GPU)
score=res[0][1]
print("Score:",score,"level score",l[1])
if score<l[1]:
print("Found in cache",l[2])
if isLast:
print("Return from cache")
if os.path.exists(answerPath):
with open(answerPath, "rb") as f:
answer=pickle.load(f)
#answer=json.load(f)
return [
answer,
lambda x: None
]
else:
print("Go deeper")
cachePath=subPath
foundSub=True
break
if not foundSub:
f=uuid.uuid4().hex+".bin"
embeddingPath=os.path.join(cachePath,f)
answerPath=embeddingPath.replace(".bin",".dat")
subPath=embeddingPath.replace(".bin","")
if isLast:
print("Not in cache!")
def writeAnswer(answer):
print("Add answer to smart cache")
EmbeddingsManager.write(embeddingPath,l[0])
with open(answerPath, "wb") as f:
pickle.dump(answer, f)
#json.dump(answer, f)
return [
None,
writeAnswer
]
else:
print("Create deeper level")
os.mkdir(subPath)
cachePath=subPath
EmbeddingsManager.write(embeddingPath,l[0])
def lookup(self, prompt: str, llm_string: str):
shortQuestion=prompt[prompt.rfind("QUESTION:")+len("QUESTION:"):prompt.rfind("FINAL ANSWER")]
[answer,writer] = self.queryCache(shortQuestion,prompt,self.CONFIG["SMART_CACHE"])
if not writer is None:
for k in list( self.WAIT_FOR_UPDATE):
if self.WAIT_FOR_UPDATE[k][1]<time.time():
del self.WAIT_FOR_UPDATE[k]
self.WAIT_FOR_UPDATE[(prompt,llm_string)]=[writer,time.time()+1000*60*15]
return answer
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
[writer,timeout]=self.WAIT_FOR_UPDATE.pop((prompt,llm_string))
if writer is not None: writer(return_val)
| [] |
2024-01-10 | riccardobl/chat-jme | OpenAICachedEmbeddings.py | """Wrapper around OpenAI embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.embeddings.openai import OpenAIEmbeddings
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
import hashlib
import os,pickle,time
class OpenAICachedEmbeddings(BaseModel, Embeddings):
"""Wrapper around OpenAI embedding models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
openai = OpenAIEmbeddings(openai_api_key="my-api-key")
"""
client: Any #: :meta private:
document_model_name: str = "text-embedding-ada-002"
query_model_name: str = "text-embedding-ada-002"
openai_api_key: Optional[str] = None
cachePath:Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
# TODO: deprecate this
@root_validator(pre=True)
def get_model_names(cls, values: Dict) -> Dict:
"""Get model names from just old model name."""
if "model_name" in values:
if "document_model_name" in values:
raise ValueError(
"Both `model_name` and `document_model_name` were provided, "
"but only one should be."
)
if "query_model_name" in values:
raise ValueError(
"Both `model_name` and `query_model_name` were provided, "
"but only one should be."
)
model_name = values.pop("model_name")
values["document_model_name"] = f"text-search-{model_name}-doc-001"
values["query_model_name"] = f"text-search-{model_name}-query-001"
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
try:
import openai
openai.api_key = openai_api_key
values["client"] = openai.Embedding
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please it install it with `pip install openai`."
)
return values
def _embedding_func(self, text: str, *, engine: str) -> List[float]:
textHash=None
cachedFile=None
embeddings=None
if self.cachePath is not None:
if not os.path.exists(self.cachePath):
os.makedirs(self.cachePath)
textHash=hashlib.sha256(text.encode('utf-8')).hexdigest()
cachedFile=os.path.join(self.cachePath, textHash+".pickle")
if os.path.exists(cachedFile):
with open(cachedFile, 'rb') as f:
embeddings = pickle.load(f)
if embeddings is None:
embeddings=self._embedding_func2(text, engine=engine)
if cachedFile is not None:
with open(cachedFile, 'wb') as f:
pickle.dump(embeddings, f)
for f in os.listdir(self.cachePath):
if f.endswith(".pickle") and os.path.getmtime(os.path.join(self.cachePath, f)) < time.time() - 86400:
os.remove(os.path.join(self.cachePath, f))
return embeddings
def _embedding_func2(self, text: str, *, engine: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint."""
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return self.client.create(input=[text], engine=engine)["data"][0]["embedding"]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to OpenAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
responses = [
self._embedding_func(text, engine=self.document_model_name)
for text in texts
]
return responses
def embed_query(self, text: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self._embedding_func(text, engine=self.query_model_name)
return embedding
| [] |
2024-01-10 | riccardobl/chat-jme | ingest~website.py | #Ingest website pages
import requests
from bs4 import BeautifulSoup
import hashlib
from langchain.docstore.document import Document
import time
from . import indexbuilder
class Website(indexbuilder.IndexBuilder):
def __init__(self,config,options):
super().__init__(config,options)
self.index=[
"https://jmonkeyengine.org/start/",
"https://jmonkeyengine.org/",
"https://jmonkeyengine.org/docs/",
"https://jmonkeyengine.org/license/"
]
self.malformGuard="jMonkeyEngine"
def __iter__(self):
for link in self.index:
for i in range(0, 10):
try:
print("Fetch", link)
req = requests.get(link)
req.raise_for_status()
content = req.content
if not self.malformGuard in content.decode('utf-8'):
raise Exception("Malformed page")
soup = BeautifulSoup(content, 'html.parser')
articlesFull=""
for article in soup.select('article'):
text =article.get_text()
articlesFull+="\n"+text
articlesFull = "\n".join([t for t in articlesFull.split("\n") if t])
hash=self._getDocId(content)
doc = Document(page_content=articlesFull, metadata={"source": link, "hash":hash})
yield doc
break
except Exception as e:
print("Error", link, e)
time.sleep(1)
| [] |
2024-01-10 | riccardobl/chat-jme | ingest~source.py | # Clone the repo and ingest all the java and markdown files
import hashlib
from langchain.docstore.document import Document
import os
import re
from . import indexbuilder
import time
class Source(indexbuilder.IndexBuilder) :
def __init__(self,config,options,githubRepo, branch,includeFiles):
super().__init__(config,options)
self.index=[]
self.includeFiles=includeFiles
self.repo=githubRepo
self.path=os.path.join(config["CACHE_PATH"],"ingest",self.repo.replace("/","_"))
self.baseUrl="https://github.com/"+self.repo+"/blob/"+branch+"/"
if not os.path.exists(self.path):
os.system("git clone https://github.com/"+self.repo+".git --depth 1 --branch "+branch+" "+self.path)
def findAllFiles(self,path):
for root, dirs, files in os.walk(path):
for file in files:
yield os.path.join(root, file)
def getFileType(self, path):
ext=path.split(".")[-1]
for key in self.includeFiles:
if ext in self.includeFiles[key]:
return key
return None
def __iter__(self):
for f in self.findAllFiles(self.path):
type=self.getFileType(f)
if type==None: continue
link=self.baseUrl+os.path.relpath(f, self.path)
print("Process",f,link,"of type",type,"...")
t=time.time()
content=open(f, "r").read()
if type=="java":
content=content[content.find("package"):]
content = "\n".join([t for t in content.split("\n") if t])
print("Read",f,"in",time.time()-t,"seconds")
hash=self._getDocId(content)
doc = Document(page_content=content, metadata={"source": link, "hash":hash, "type":type})
yield doc
| [] |
2024-01-10 | zwssunny/chatgpt-on-wechat | voice~factory.py | """
voice factory
"""
def create_voice(voice_type):
"""
create a voice instance
:param voice_type: voice type code
:return: voice instance
"""
if voice_type == "baidu":
from voice.baidu.baidu_voice import BaiduVoice
return BaiduVoice()
elif voice_type == "google":
from voice.google.google_voice import GoogleVoice
return GoogleVoice()
elif voice_type == "openai":
from voice.openai.openai_voice import OpenaiVoice
return OpenaiVoice()
elif voice_type == "pytts":
from voice.pytts.pytts_voice import PyttsVoice
return PyttsVoice()
elif voice_type == "azure":
from voice.azure.azure_voice import AzureVoice
return AzureVoice()
elif voice_type == "vits":
from voice.vits.vits_voice import VITSVoice
return VITSVoice()
raise RuntimeError
| [] |
2024-01-10 | rkaganda/minecraft_explore_bot | observe_bot.py | from javascript import require, On, Once, AsyncTask, once, off
import math
import logging
import json
import bot_functions
import bot_tasks
import config
import db
from db import BotDB
import openai
# logger init
logger = logging.getLogger('bot')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename=config.settings['bot_log_path'], encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
# load js
mineflayer = require('mineflayer')
Vec3 = require('vec3')
pathfinder = require('mineflayer-pathfinder')
# init bot
BOT_USERNAME = config.settings['bot_name']
bot = mineflayer.createBot({
'host': config.settings['server_ip'],
'port': config.settings['server_port'],
'username': BOT_USERNAME,
'hideErrors': False})
# add modules to bot
bot.loadPlugin(pathfinder.pathfinder)
mcData = require('minecraft-data')(bot.version)
movements = pathfinder.Movements(bot, mcData)
# init db
db.init_db()
# spawn bot
once(bot, 'spawn')
bot.chat(f'{BOT_USERNAME} spawned')
logger.info(f'{BOT_USERNAME} spawned')
# bot state
last_location = bot.entity.position
current_bot_tasks = list()
@On(bot, 'chat')
def handle_msg(_bot, sender, message, *args):
try:
if sender and (sender != BOT_USERNAME):
_bot.chat(f'heard - {message}')
logger.info(f'heard - {message}')
if 'come' in message:
player = bot.players[sender]
target = player.entity
if not target:
bot.chat("can't see target")
return
pos = target.position
current_bot_tasks.extend([{
"function": bot_functions.go_to_location,
"arguments": {"location": pos}
}])
bot_functions.go_to_location(bot, pos, 1)
elif message == 'stop':
off(_bot, 'chat', handle_msg)
elif message == 'do_task':
do_task()
else:
_bot.chat("processing task...")
handle_user_request(_bot, message)
except Exception as e:
logger.exception("bot:chat")
raise e
@On(bot, 'move')
def handle_move(*args):
try:
def euclidean_distance_3d(point1, point2):
return math.sqrt((point2.x - point1.x) ** 2 + (point2.y - point1.y) ** 2 + (point2.z - point1.z) ** 2)
global last_location
move_threshold = 2
bot_location = bot.entity.position
if bot_location is not None:
distance_traveled = round(abs(euclidean_distance_3d(bot_location, last_location)))
if distance_traveled > move_threshold:
bot_functions.observe_local_blocks(bot)
last_location = bot.entity.position
except Exception as e:
logger.exception("bot:move")
raise e
@On(bot, 'goal_reached')
def handle_goal_reached(*args):
bot.chat(f"goal reached.")
try:
if len(current_bot_tasks) > 0:
current_task = current_bot_tasks[0] # get the current task/function
logger.debug(f"handle_goal_reached : current_task={current_task['function'].__name__}")
logger.debug(f"current_task['function'].__name__ = {current_task['function'].__name__}")
logger.debug(f"bot_functions.go_to_location.__name__ = {bot_functions.go_to_location.__name__}")
# if the current task is go_to_location
if current_task['function'].__name__ == bot_functions.go_to_location.__name__:
current_bot_tasks.pop(0) # goal was reached so remove got_to_location from list
logger.debug(f"pop len(current_bot_tasks)={len(current_bot_tasks)}")
else:
logger.debug("mismatch")
do_task()
except Exception as e:
logger.exception("bot:goal_reached")
raise e
@On(bot, 'diggingCompleted')
def handle_digging_completed(*args):
logger.debug(f"handle_digging_completed start")
bot_functions.observe_local_blocks(bot) # update state
try:
if len(current_bot_tasks) > 0:
current_task = current_bot_tasks[0] # get the current task/function
logger.debug(f"handle_digging_completed : current_task={current_task['function'].__name__}")
bot.chat("digging completed.")
# if the current task is dig_block_by_location
if current_task['function'].__name__ == bot_functions.dig_block_by_location.__name__:
current_bot_tasks.pop(0) # dig_block_by_location done, remove from task list
logger.debug(f"pop len(current_bot_tasks)={len(current_bot_tasks)}")
do_task() # call do task
except Exception as e:
logger.exception("bot:handle_digging_completed")
raise e
@On(bot, 'diggingAborted')
def handle_digging_aborted(block):
logger.debug(f"handle_digging_aborted start")
try:
if len(current_bot_tasks) > 0:
current_task = current_bot_tasks[0] # get the current task/function
logger.debug(f"handle_digging_aborted : current_task={current_task}")
# if the current task is dig_block_by_location
if current_task['function'] == bot_functions.dig_block_by_location:
do_task() # call do task without removing current task to reattempt dig_block_by_location
except Exception as e:
logger.exception("bot:handle_digging_aborted")
raise e
@On(bot, 'playerCollect')
def handle_player_collect(_bot, collector, collected):
if collector.id == bot.entity.id:
bot.chat(f"collected item {collected.name}")
# logger.debug(f"handle_player_collect collector={collector} collected={collected}")
@On(bot, 'entitySpawn')
def handle_entity_spawn(_bot, entity):
pass
# logger.debug(f"handle_entity_spawn args={entity}")
@On(bot, 'itemDrop')
def handle_item_drop(_bot, entity):
if entity.position.distanceTo(_bot.entity.position) < 2:
logger.debug("scheduled item pickup")
current_bot_tasks.append({
"function": bot_functions.go_to_location,
"arguments": {
"location": Vec3(entity.position.x, entity.position.y, entity.position.z),
"distance_from": 0
}})
if len(current_bot_tasks) == 1: # our task is the only one
do_task()
def do_task():
if len(current_bot_tasks) > 0: # if there is another task
next_task = current_bot_tasks[0] # get next task
logger.debug(f"do_task : next_task['function']={next_task['function']}")
logger.debug(f"do_task : next_task['arguments'].keys()={next_task['arguments'].keys()}")
next_task['arguments']['bot'] = bot # add bot to arguments
next_task['function'](**next_task['arguments']) # call next task function
def handle_user_request(_bot, message):
bot_db = BotDB()
response, response_type = openai.query_functions_functions(bot_db, message)
if response_type == 'function_call':
if 'arguments' in response:
function_args = response['arguments']
function_args = json.loads(function_args)
else:
function_args = {}
logger.debug(f"handle_user_request: function={response['name']} args={function_args}")
if 'unused' in function_args: # remove unused param
del function_args['unused']
function_args['bot'] = _bot
function_args['bot_tasks'] = current_bot_tasks
function_call = response['name']
method = getattr(bot_tasks, function_call, None) # load the function
method(**function_args) # call the function with the arguments provided
| [
"heard - PLACEHOLDER",
"PLACEHOLDER spawned",
"digging completed.",
"goal reached.",
"processing task...",
"can't see target"
] |
2024-01-10 | pytorch/vision | torchvision~datasets~country211.py | from pathlib import Path
from typing import Callable, Optional
from .folder import ImageFolder
from .utils import download_and_extract_archive, verify_str_arg
class Country211(ImageFolder):
"""`The Country211 Data Set <https://github.com/openai/CLIP/blob/main/data/country211.md>`_ from OpenAI.
This dataset was built by filtering the images from the YFCC100m dataset
that have GPS coordinate corresponding to a ISO-3166 country code. The
dataset is balanced by sampling 150 train images, 50 validation images, and
100 test images for each country.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"valid"`` and ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and puts it into
``root/country211/``. If dataset is already downloaded, it is not downloaded again.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/country211.tgz"
_MD5 = "84988d7644798601126c29e9877aab6a"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "valid", "test"))
root = Path(root).expanduser()
self.root = str(root)
self._base_folder = root / "country211"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(str(self._base_folder / self._split), transform=transform, target_transform=target_transform)
self.root = str(root)
def _check_exists(self) -> bool:
return self._base_folder.exists() and self._base_folder.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
| [] |
2024-01-10 | Siddhartha90/The-Aubergine-index | sentimentAnalysis.py | import openai
import os, json
openai.api_key = os.environ.get("OPENAI_API_KEY")
def sentimentAnalysis(reviews, keyword):
# reviews = """
# 0: Loved it!! Awesome service. The food was so good that I didn't have time to take too many pictures. The service was impeccable and very attentive. It was overall a very din experience. Also you need to try the Greek coffee. I loved it so much that ended up having two cups.
# 1: Stunning restaurant, delicious food, great wine selection and incredible staff. I had dinner with some friends a few months ago. Even though we had a reservation it took some time to get us seated and the waiters more than made it up to us by offering outstanding service and a complimentary desert. Highly recommend. Perfect date night spot.
# """
print(reviews)
system_msg = "You are an assistant that only returns valid JSON, with no pretext or posttext. "
# keyword = "gluten free food"
# Define the user message
user_json_msg = f"You are answering questions on the following reviews```{reviews}```"
assistant_json_msg = f"[10]"
user_msg = f"Given this keyword ```{keyword}, Reply with how the related sentiment is for the given result. Use lateral thinking, for example, if it's implied all they sell is steak, that's probably gluten free"
response = openai.ChatCompletion.create(model="gpt-4",
temperature=0,
messages=[{"role": "system", "content": system_msg},
{"role": "user", "content": user_json_msg},
{"role": "assistant", "content": assistant_json_msg},
{"role": "user", "content": user_msg},
])
print(response)
sentiment = response.choices[0].message.content
# content = '"content": "{\n \"keyword\": \"gluten free food\",\n \"sentiment\": \"neutral\",\n \"explanation\": \"The reviews do not mention gluten free food specifically, but the overall sentiment is positive.\"\n}"'
print(type(sentiment))
# json_acceptable_string = content.replace("'", "\"")
sentimentJson = json.loads(sentiment)
print(type(sentimentJson))
# print("sentiment - " + sentimentJson["sentiment"])
# print("reason - " + sentimentJson["reason"])
return sentimentJson
# sentimentAnalysis()
| [
"You are answering questions on the following reviews```PLACEHOLDER```",
"Given this keyword ```PLACEHOLDER, Reply with how the related sentiment is for the given result. Use lateral thinking, for example, if it's implied all they sell is steak, that's probably gluten free",
"[10]",
"You are an assistant that only returns valid JSON, with no pretext or posttext. "
] |
2024-01-10 | pilievwm/desc | category.py | import json
import time
import openai
from collections import defaultdict
import requests
import validators
from helpers import *
import time
import re
import html
import textwrap
from bs4 import BeautifulSoup
from request_counter import count_requests, global_counter, get
from datetime import datetime
from flask_socketio import SocketIO, emit
from config import Config
import random
import query
from prompts import *
app_settings = {}
category_settings = {}
seo_settings = {}
stop_category_process = {}
formatted_now = None
now = None
def reset_stop():
print('Resetting the process...')
global stop_category_process
stop_category_process = False
socketio = None
def set_socketio(sio):
global socketio
socketio = sio
#################### MAIN FUNCTIONS ####################
### GET CATEGORY INFO ###
def getCategoryInfo(app_settings):
headers = {
'X-CloudCart-ApiKey': app_settings['X-CloudCart-ApiKey'],
}
url = f"{app_settings['url']}/api/v2/categories?fields[categories]=parent_id,name,url_handle&page[size]=100"
if not validators.url(url):
raise Exception("The URL provided in 'app_settings' is not valid")
categories_by_id = {}
children_by_parent_id = defaultdict(list)
max_retries = 15
next_url = url
while next_url:
for attempt in range(max_retries):
try:
response = get(next_url, headers=headers)
if response.status_code != 200:
raise Exception(f"Request to {url} failed with status code {response.status_code}. The response was: {response.text}")
data = response.json()
for category in data['data']:
id = category['id']
parent_id = str(category['attributes']['parent_id']) # Convert parent_id to string here
categories_by_id[id] = category
if parent_id != 'None':
children_by_parent_id[parent_id].append(id)
next_url = data['links']['next'] + "&fields[categories]=parent_id,name,url_handle" if "next" in data["links"] else None
break
except Exception as e:
if attempt < max_retries - 1:
wait_time = 5 * (attempt + 1)
print(f"Error occured at CloudCart. Waiting for {wait_time} seconds before retrying.")
time.sleep(wait_time)
else:
raise
return categories_by_id, children_by_parent_id
### GET CATEGORY LEVELS ###
def getCategoryLevels(app_settings, category_id, categories_by_id, children_by_parent_id):
info = {'root_category': None, 'same_level_categories': [], 'sub_level_categories': []}
category = categories_by_id.get(str(category_id))
if category is None:
print(f"Category with ID {category_id} does not exist")
return info
root_category_id = str(category['attributes']['parent_id'])
if root_category_id != 'None':
root_category = categories_by_id.get(root_category_id, None)
if root_category is not None:
info['root_category'] = {'id': root_category['id'], 'name': root_category['attributes']['name'], 'url': app_settings['url'] + "/category/" + root_category['attributes']['url_handle']}
# same-level categories are those with the same parent as the target category
same_level_ids = children_by_parent_id.get(root_category_id, [])
else:
# if the target category is a root category, then all root categories are its same-level categories
same_level_ids = [cat_id for cat_id, cat in categories_by_id.items() if str(cat['attributes']['parent_id']) == 'None']
same_level_ids = [str(id) for id in same_level_ids]
info['same_level_categories'] = [{'id': categories_by_id[id]['id'], 'name': categories_by_id[id]['attributes']['name'], 'url': app_settings['url'] + "/category/" + categories_by_id[id]['attributes']['url_handle']} for id in same_level_ids if id != str(category['id'])]
sub_level_ids = children_by_parent_id.get(str(category['id']), [])
sub_level_ids = [str(id) for id in sub_level_ids]
info['sub_level_categories'] = [{'id': categories_by_id[id]['id'], 'name': categories_by_id[id]['attributes']['name'], 'url': app_settings['url'] + "/category/" + categories_by_id[id]['attributes']['url_handle']} for id in sub_level_ids]
return info
### GET ORDERED PRODUCTS BY SALES ###
def getOrderedProductsbySales(category_id, app_settings, category_settings, target_category_info):
headers = {'X-CloudCart-ApiKey': f'{app_settings["X-CloudCart-ApiKey"]}'}
vendors = getVendors(app_settings)
vendor_mapping = {str(vendor['id']): {'name': vendor['attributes']['name'], 'url': app_settings['url'] + "/vendor/" + vendor['attributes']['url_handle']} for vendor in vendors}
ordered_products_url = f'{app_settings["url"]}/api/v2/order-products?filter[category_id]={category_id}&page[size]=100&sort=-order_id'
ordered_product_data = []
brand_count = {} # Initialize brand_count
for page_number in range(1, int(category_settings['max_order_pages'] + 1)):
page_url = f'{ordered_products_url}&page[number]={page_number}'
response = get(page_url, headers=headers)
data = response.json().get('data', [])
ordered_product_data += data
if not ordered_product_data:
print("No orders found for the category.")
return {}, []
product_order_count = {}
for product in ordered_product_data:
product_id = str(product['attributes']['product_id'])
product_order_count[product_id] = product_order_count.get(product_id, 0) + 1
sorted_products = sorted(product_order_count.items(), key=lambda x: x[1], reverse=True)
active_products_url = f'{app_settings["url"]}/api/v2/products?filter[category_id]={category_id}&page[size]=100&filter[active]=yes'
active_product_data = []
page_number = 1
#while category_settings['only_active_products']:
while True:
page_url = f'{active_products_url}&page[number]={page_number}'
response = get(page_url, headers=headers)
data = response.json().get('data', [])
active_product_data += data
next_page_url = response.json().get('links', {}).get('next')
if next_page_url is None or not data:
break
page_number += 1
if not active_product_data:
print("No active products found in the category.")
return {}, []
active_product_ids = {str(product['id']) for product in active_product_data}
# Assigning keys based on 'only_active'
best_sellers = [product for product in active_product_data if str(product['id']) in active_product_ids]
price_key = 'price_from'
url_handle_key = 'url_handle'
product_id_key = 'id' # In this case 'id' is used for product_id
'''
if category_settings['only_active_products']:
# For active products
best_sellers = [product for product in active_product_data if str(product['id']) in active_product_ids]
price_key = 'price_from'
url_handle_key = 'url_handle'
product_id_key = 'id' # In this case 'id' is used for product_id
else:
# For ordered products
best_sellers = [product for product in ordered_product_data if str(product['attributes']['product_id']) in (product_id for product_id, count in sorted_products)]
price_key = 'order_price'
url_handle_key = '' # Since 'url_handle' is not available in ordered_product_data
product_id_key = 'product_id' # In this case 'product_id' is used
'''
if best_sellers:
prices = [product['attributes'][price_key] / 100 for product in best_sellers if product['attributes'][price_key] is not None]
if prices:
lowest_price = round(min(prices), 2)
highest_price = round(max(prices), 2)
else:
lowest_price = 0.00
highest_price = 0.00
else:
print("No active best sellers found.")
return {}, []
price_range = round((highest_price - lowest_price) / 3, 2)
products_by_sales = {
"entry_level_products": [],
"mid_size_products": [],
"hi_end_products": []
}
for product in best_sellers:
product_id = str(product[product_id_key])
'''
if category_settings['only_active_products']:
product_id = str(product[product_id_key]) # Use 'id' for active products
else:
product_id = str(product['attributes'][product_id_key]) # Use 'product_id' for ordered products
'''
price = product['attributes'][price_key] / 100 if product['attributes'][price_key] is not None else 0
brand_info = vendor_mapping.get(str(product['attributes']['vendor_id']), {'name': str(product['attributes']['vendor_id']), 'url': ''})
brand_name = brand_info['name']
brand_count[brand_name] = brand_count.get(brand_name, 0) + 1
product_url = app_settings['url'] + "/product/" + (product['attributes'][url_handle_key] if url_handle_key else '')
# extract images from the response.
image_id = product['attributes'].get('image_id', None)
# get the image url from this endpoint /api/v2/images/237
if image_id:
image_url = fetch_image_url(app_settings['url'], image_id, headers)
else:
image_url = None
product_info = {
"name": product['attributes']['name'],
"orders": product_order_count.get(product_id, 0),
"price": "{:.2f}".format(price),
"brand_name": brand_name,
"product_url": product_url, # added product_url
"brand_url": brand_info['url'], # added brand_url
"image_url": image_url
}
if price <= lowest_price + price_range:
products_by_sales["entry_level_products"].append(product_info)
elif price <= lowest_price + 2 * price_range:
products_by_sales["mid_size_products"].append(product_info)
else:
products_by_sales["hi_end_products"].append(product_info)
sales_limit = category_settings['add_best_selling_products'] if category_settings.get('include_sales_info', False) else 0
faq_limit = category_settings['add_best_selling_products_faq'] if category_settings.get('include_faq_info', False) else 0
max_products_limit = max(sales_limit, faq_limit)
for category in products_by_sales:
products_by_sales[category] = sorted(products_by_sales[category], key=lambda x: x["orders"], reverse=True)[:max_products_limit]
sales_limit_brands = category_settings['add_top_brands'] if category_settings.get('include_sales_info', False) else 0
faq_limit_brands = category_settings['add_top_brands_faq'] if category_settings.get('include_faq_info', False) else 0
max_brands_limit = max(sales_limit_brands, faq_limit_brands)
best_selling_brands = sorted(brand_count.items(), key=lambda x: x[1], reverse=True)[:max_brands_limit]
best_selling_brands = [
{
"Brand name": brand,
"Orders": count,
"url": target_category_info['url_handle'] + "?vendors=" + next((v['url'].split("/vendor/")[-1] for k, v in vendor_mapping.items() if v['name'] == brand), '')
}
for brand, count in best_selling_brands
]
return products_by_sales, best_selling_brands
### GET CATEGORY DETAILS ###
def getCategoryDetails(app_settings, fields=[]):
headers = {
'X-CloudCart-ApiKey': app_settings['X-CloudCart-ApiKey'],
}
# Convert the fields list to a comma-separated string
field_string = ','.join(fields)
# Construct the URL based on the presence of fields
base_url = f"{app_settings['url']}/api/v2/categories?page[size]=100"
url = f"{base_url}&fields[categories]={field_string}" if fields else base_url
max_retries = 15
next_url = url
categories_data = []
while next_url:
for attempt in range(max_retries):
try:
if not validators.url(next_url):
raise Exception("The URL provided is not valid")
response = get(next_url, headers=headers)
if response.status_code != 200:
raise Exception(f"Request to {url} failed with status code {response.status_code}. The response was: {response.text}")
data = response.json()
for item in data.get('data', []):
category_data = {}
category_data['id'] = item.get('id')
# Fetching other attributes if they exist inside the 'attributes' dictionary
attributes = item.get('attributes', {})
for field in fields:
if field != 'id': # We've already fetched the ID
category_data[field] = attributes.get(field)
categories_data.append(category_data)
next_url = data['links']['next'] if "next" in data["links"] else None
break
except Exception as e:
if attempt < max_retries - 1:
wait_time = 5 * (attempt + 1)
print(f"Error occurred. Waiting for {wait_time} seconds before retrying.")
time.sleep(wait_time)
else:
raise
return categories_data
#################### HELPERS ####################
### GET VENDOR DETAILS ###
def getVendorDetails(vendor_id, app_settings):
headers = {
'X-CloudCart-ApiKey': app_settings['X-CloudCart-ApiKey'],
}
# Build the base URL
url = f"{app_settings['url']}/api/v2/vendors/{vendor_id}?page_size=100"
if not validators.url(url):
raise Exception("The URL provided is not valid")
# Send the request
response = get(url, headers=headers)
if response.status_code != 200:
raise Exception(f"Request to {url} failed with status code {response.status_code}. The response was: {response.text}")
vendor_details = response.json().get('data', None)
return vendor_details
### GET ALL VENDORS ###
def getVendors(app_settings):
headers = {
'X-CloudCart-ApiKey': app_settings['X-CloudCart-ApiKey'],
}
url = f"{app_settings['url']}/api/v2/vendors?fields[vendors]=name,url_handle&page[size]=100"
max_retries = 15
next_url = url
vendors = []
while next_url:
for attempt in range(max_retries):
try:
if not validators.url(next_url):
raise Exception("The URL provided is not valid")
response = get(next_url, headers=headers)
if response.status_code != 200:
raise Exception(f"Request to {url} failed with status code {response.status_code}. The response was: {response.text}")
data = response.json()
vendors.extend(data.get('data', []))
next_url = data['links']['next'] if "next" in data["links"] else None
break
except Exception as e:
if attempt < max_retries - 1:
wait_time = 5 * (attempt + 1)
print(f"Error occurred. Waiting for {wait_time} seconds before retrying.")
time.sleep(wait_time)
else:
raise
return vendors
### GET TARGET CATEGORY INFO ###
def getTargetCategoryInfo(db, Processed_category, app_settings, category_id, category_settings, seo_settings, project_id, include_description=False):
now = datetime.now()
formatted_now = now.strftime("%d/%m/%Y %H:%M:%S")
headers = {
'X-CloudCart-ApiKey': app_settings['X-CloudCart-ApiKey'],
}
url = f"{app_settings['url']}/api/v2/categories/{category_id}?fields[categories]=name,description,url_handle,properties&include=properties"
max_retries = 1
properties = None
for attempt in range(max_retries):
try:
response = get(url, headers=headers)
if response.status_code != 200:
raise Exception(f"Request to {url} failed with status code {response.status_code}. The response was: {response.text}")
# Get the data from the response
data = response.json().get('data', {})
url = f"{app_settings['url']}/category/{data['attributes'].get('url_handle')}"
category_name = data['attributes'].get('name')
if category_settings['include_properties'] or category_settings['include_properties_faq']:
# Get links from a page by calling the function get_links_from_page only if the category contains properties
if data['relationships'].get('properties'):
Config.socketio.emit('log', {'data': f'{formatted_now}: Getting links from {url}'},room=str(project_id), namespace='/')
properties = get_links_from_page(url, app_settings, category_settings, category_name, seo_settings, project_id)
description = ''
# compare the description_lenght with the number of characters
# get the description, remove HTML tags
raw_description = re.sub('<[^<]+?>', '', html.unescape(data['attributes'].get('description')))
# check
if category_settings.get('description_length') != 0 and category_settings.get('description_length') < len(raw_description):
Config.socketio.emit('log', {'data': f'{formatted_now}: The description of the category is longer than the threshold target. The process will end...'}, room=str(project_id), namespace='/')
raise Exception(f"The description of the category is {len(raw_description)} characters which is longer than the threshold {category_settings.get('description_length')} target. The process will end...")
# Save the processed category
# This is information about all fields from the database
# project_id, category_id, category_structure, category_name, category_prompt, category_description, category_faqs, category_keywords, category_custom_keywords
query.save_processed_category(db, Processed_category, project_id, category_id, category_name=category_name.lower(), category_url=url)
# Get description_length. If it's missing or not positive, use a default value (e.g., 100).
description_length = category_settings.get('description_length')
if not description_length or description_length <= 0:
description_length = 10 # Set default width as 100, change to a suitable value as per your needs
# truncate to the end of the sentence at or before description_length characters
truncated_description = textwrap.shorten(raw_description, width=description_length, placeholder="...")
if include_description:
description = data['attributes'].get('description'),
# format the output
target_category_info = {
"name": data['attributes'].get('name'),
"id": data.get('id'),
"url_handle": url,
"description": description,
# add condition to check if the category contains properties
"properties": properties if properties else []
}
# If request was successful, break out of the loop and return category info
return target_category_info
except Exception as e:
if attempt < max_retries - 1:
wait_time = 5 * (attempt + 1)
Config.socketio.emit('log', {'data': f'{formatted_now}: Error occurred. Waiting for {wait_time} seconds before retrying.'},room=str(project_id), namespace='/')
time.sleep(wait_time)
else:
raise
### SCRAPE PROPERTIES FROM THE CATEGORY PAGE ###
def get_links_from_page(url, app_settings, category_settings, category_name, seo_settings, project_id):
now = datetime.now()
formatted_now = now.strftime("%d/%m/%Y %H:%M:%S")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
}
Config.socketio.emit('log', {'data': f'{formatted_now}: Getting properties from {url}'},room=str(project_id), namespace='/')
response = get(url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
# List of possible selectors
selectors = [
'._filter-category-property',
'._filter-category-properties ._filter-category-property',
'._filter-category-property .js-section-title js-filter-category-property-toggle active',
'._filter-category-property'
]
# Try each selector until we find results
for selector in selectors:
property_blocks = soup.select(selector)
if property_blocks:
break
results = {}
prop_count = 0 # Counter for properties
max_props = int(category_settings.get('max_props', 0)) # Maximum number of properties
for block in property_blocks:
if prop_count >= max_props:
break # Stop the loop if we have reached the maximum number of properties
property_title = block.select_one('._filter-category-property-title h5').text
form_rows = block.select('._form-row')
count = 0
if property_title not in results:
results[property_title] = []
prop_count += 1 # Increment the property counter only when a new property is encountered
else:
continue # Skip to the next block if the property is already in results
if category_settings['include_properties'] or category_settings['include_properties_faq']:
for row in form_rows:
if count < int(category_settings.get('max_property_values', 0)) or count < int(category_settings.get('max_property_values_faq', 0)):
input_tag = row.select_one('input.category-property-filter')
label = row.select_one('label._checkbox')
property_name = input_tag.get('data-property-name') if input_tag else None
value = input_tag.get('value') if input_tag else None
value_title = label.text.strip() if label else ''
if property_name and value:
new_url = f'{url}?{property_name}={value}'
results[property_title].append([value_title, new_url])
count += 1
Config.socketio.emit('log', {'data': f'{formatted_now}: Found properties...'},room=str(project_id), namespace='/')
if results:
if seo_settings.get('generate_keywords'):
keywords = craft_keywords_urls(app_settings, category_settings, seo_settings, category_name, results, project_id)
keywords = keywords.strip()
return json.loads(keywords)
else:
return results
### GET CATEGORY IDS only ###
def getCategoryIds(app_settings):
headers = {
'X-CloudCart-ApiKey': app_settings['X-CloudCart-ApiKey'],
}
url = f"{app_settings['url']}/api/v2/categories?fields[categories]=id&page[size]=100"
max_retries = 15
next_url = url
category_ids = []
while next_url:
for attempt in range(max_retries):
try:
if not validators.url(next_url):
raise Exception("The URL provided is not valid")
response = get(next_url, headers=headers)
if response.status_code != 200:
raise Exception(f"Request to {url} failed with status code {response.status_code}. The response was: {response.text}")
data = response.json()
category_ids.extend([category.get('id') for category in data.get('data', [])])
next_url = data['links']['next'] if "next" in data["links"] else None
break
except Exception as e:
if attempt < max_retries - 1:
wait_time = 5 * (attempt + 1)
print(f"Error occurred. Waiting for {wait_time} seconds before retrying.")
time.sleep(wait_time)
else:
raise
return category_ids
#################### OPENAI FUNCTIONS ####################
### CRAFT KEYWORDS FOR EACH PROPERTY VALUE ###
def craft_keywords_urls(app_settings, category_settings, seo_settings, category_name, results, project_id):
now = datetime.now()
formatted_now = now.strftime("%d/%m/%Y %H:%M:%S")
openai.api_key = app_settings['openai_key']
if results is None:
Config.socketio.emit('log', {'data': f'{formatted_now}: No properties found for the category'},room=str(project_id), namespace='/')
return
json_structure_example = {"season": [["summer", "https://shop.mdl.bg/category/bodi-women?y=1"], ["winter", "https://shop.mdl.bg/category/bodi-women?y=2"]]}
json_structure_example_str = str(json_structure_example)
# Modify the prompt to ask for keywords for the specific property value
prompt = (f"As a {app_settings['language']} SEO researcher working for {category_name} category. Craft maximum of {seo_settings['max_keywords']} SEO-optimized keywords for each property name and its values. Use this {category_name} category name for a context when crafting the keywords and also \"price\" or \"affordable price\". This is an example of the array: {json_structure_example_str} where \"season\" is the name of the property group, \"summer\" is the value of the property and the link is the link to the value. To craft a keyword, use the property name \"season\" as a context and use each values \"summer\" and \"winter\" to generate keywords. For example, use the property name as a context to generate 2 keywords for the value \"summer\" and 2 keywords for the value \"winter\". *** This is the actual information that you need to use to generate keywords: {results} ***")
#prompt = (f"As a Bulgarian SEO researcher, craft at least {category_settings['max_keywords']} SEO optimized keywords for the property '{property_name}' with value '{value}' in the category '{category_name}'. ***Do not include only the property value, but also additional related keywords.***")
if app_settings['print_prompt']:
Config.socketio.emit('log', {'data': f'{formatted_now}: Prompt for keywords generation:\n{prompt}'},room=str(project_id), namespace='/')
return(prompt)
system_prompt = (
'The output must be strictly valid JSON structure like this example: '
'{"y":[{"id":"1","url":"https://shop.mdl.bg/category/bodi-women?y=1","keywords":["keyword1","keyword 2"]},'
'{"id":"2","url":"https://shop.mdl.bg/category/bodi-women?y=2","keywords":["keyword1","keyword 2"]}]}'
f'*** The output must be ONLY the JSON structure explicitly and you should keep the {app_settings["language"]} language from the prompt!***'
)
max_retries = 15
Config.socketio.emit('log', {'data': f'{formatted_now}: Generating keywords...'},room=str(project_id), namespace='/')
for attempt in range(max_retries):
try:
response = openai.ChatCompletion.create(
model=app_settings['seo_model'],
messages=[
{"role": "user", "content": prompt},
{"role": "system", "content": system_prompt},
],
temperature=app_settings['temperature'],
)
# If the request was successful, break out of the loop
break
except openai.error.AuthenticationError:
# Handle authentication errors (e.g., invalid API key)
Config.socketio.emit('log', {'data': 'Authentication Error. Check your OpenAI API Key!'}, room=str(1), namespace='/')
break
except (openai.error.APIError, openai.error.Timeout, openai.error.ServiceUnavailableError) as e:
# Handle APIError, Timeout, and ServiceUnavailableError for retry
wait_time = 2 * (attempt + 1)
Config.socketio.emit('log', {'data': f'{formatted_now}: Encountered an issue: {e.error}. Waiting for {wait_time} seconds before retrying.'},room=str(project_id), namespace='/')
time.sleep(wait_time)
except Exception as e:
# Handle all other exceptions without retrying
Config.socketio.emit('log', {'data': f'{formatted_now}: {e}'},room=str(project_id), namespace='/')
break
else:
raise Exception("Maximum number of retries exceeded.")
answer = response['choices'][0]['message']['content']
results = answer
return results
### CRAFT FAQS ###
def craft_faqs(db, Processed_category, Category_Settings, app_settings, category_settings, seo_settings, results, project_id):
now = datetime.now()
formatted_now = now.strftime("%d/%m/%Y %H:%M:%S")
Config.socketio.emit('log', {'data': f'{formatted_now}: Crafting FAQs...'},room=str(project_id), namespace='/')
### STOP PROCESS IF USER CLICKS STOP ###
if stop_category_process.get(project_id, False):
stop_category(project_id) # Stop process
Config.socketio.emit('log', {'data': f'{formatted_now}: Process stopped by user.'},room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
db.session.commit()
return
openai.api_key = app_settings['openai_key']
# Extract target_category_info
target_category_info = results['target_category_info']
target_category_name = target_category_info['name']
target_category_url = target_category_info['url_handle']
target_category_properties = target_category_info['properties']
prop_output = ''
if category_settings['include_properties_faq']:
if isinstance(target_category_properties, dict):
for i, (prop, values) in enumerate(target_category_properties.items()):
# If we have already processed the maximum number of properties, break the loop
if i >= category_settings["max_property_values_faq"]:
break
prop_output += f'Property name "{prop}", '
for val in values:
# Check the type of val
if isinstance(val, list):
id_ = val[0]
url_ = val[1] # assuming the second item in the list is always a URL
prop_output += f'Value {id_}, Link: {url_}, '
elif isinstance(val, dict):
id_ = val["id"]
url_ = val["url"]
keywords = val["keywords"]
prop_output += f'Keywords: {", ".join(keywords)}, Links: {url_}, '
elif isinstance(target_category_properties, list):
# handle the list case here, for example:
for prop in target_category_properties:
prop_output += f'Property: {prop}, '
else:
raise ValueError("Unexpected type for target_category_properties")
# Extract category_info
category_info = results.get('category_info')
if category_info is None:
top_level_category = []
same_level_categories = []
sub_level_categories = []
else:
# Same level categories information
top_level_category_info = category_info.get('root_category')
if top_level_category_info and 'name' in top_level_category_info and 'url' in top_level_category_info:
top_level_category = [(top_level_category_info['name'], top_level_category_info['url'])]
else:
top_level_category = []
same_level_categories_info = category_info.get('same_level_categories', [])
same_level_categories = [(cat['name'], cat['url']) for cat in same_level_categories_info
if cat and 'name' in cat and 'url' in cat]
# Sub level categories information
sub_level_categories_info = category_info.get('sub_level_categories', [])
sub_level_categories = [(cat['name'], cat['url']) for cat in sub_level_categories_info
if cat and 'name' in cat and 'url' in cat]
if category_settings['include_faq_info']:
# Extract product level information
products_info = results.get('products_by_sales', {})
entry_level_products = []
mid_size_products = []
hi_end_products = []
if 'entry_level_products' in products_info:
entry_level_products = [(prod['name'], prod['product_url'], prod['price'])
for prod in products_info['entry_level_products']
if 'name' in prod and 'product_url' in prod and 'price' in prod]
if 'mid_size_products' in products_info:
mid_size_products = [(prod['name'], prod['product_url'], prod['price'])
for prod in products_info['mid_size_products']
if 'name' in prod and 'product_url' in prod and 'price' in prod]
if 'hi_end_products' in products_info:
hi_end_products = [(prod['name'], prod['product_url'], prod['price'])
for prod in products_info['hi_end_products']
if 'name' in prod and 'product_url' in prod and 'price' in prod]
top_brands = []
if category_settings['add_top_brands_faq']:
# Extract top brands information
top_brands = [(brand['Brand name'], brand['url']) for brand in results.get('best_selling_brands', [])]
faq_schema = '<div itemscope itemprop="mainEntity" itemtype="https://schema.org/Question"><h3 itemprop="name"><strong> ***QUESTION-PLACEHOLDER*** </strong></h3><p itemscope itemprop="acceptedAnswer" itemtype="https://schema.org/Answer"><span itemprop="text"> ***ANSWER-PLACEHOLDER*** </span></p></div>'
### PROMPTS ###
if category_settings['faq_use_schema']:
sys_prompt = f'Craft an FAQs of the category in this strictly and valid format: {faq_schema}. The answer for each FAQ must be put at ***ANSWER-PLACEHOLDER***! Additionally: The text at the ***ANSWER-PLACEHOLDER*** must be with appropriate HTML tags to improve its structure. For new lines use "br", for bold: "strong". When you have listings use "ul" and "li" or "ol" for numbers, while preserving the existing tags. Emphasize headings, subheadings, and key points by new lines, and ensure the content flows coherently. DO NOT MENTION ANYTHING FROM THE PROMPT IN ANY CASE!'
else:
sys_prompt = f'Craft an FAQs of the category. It have to be written in {app_settings["language"]}. For headings use H3. Make the text readable and for each text block you must add <p> tag. For new lines use "br", for bold: "strong". When you have listings use "ul" and "li" or "ol" for numbers, while preserving the existing tags. Emphasize headings, subheadings, and key points by new lines, and ensure the content flows coherently. DO NOT MENTION ANYTHING FROM THE PROMPT IN ANY CASE!'
system_prompt = (
sys_prompt
)
#### KEYWRDS RESEARCH ####
Config.socketio.emit('log', {'data': f'{formatted_now}: Searching Google for related searches for {target_category_name}'},room=str(project_id), namespace='/')
cluster_keywords_dict = keywords_one_level(db, Category_Settings, app_settings, main_query=target_category_name, project_id=project_id)
# Get the keywords from cluster_keywords_dict and convert them into a list
cluster_keywords_list = cluster_keywords_dict.get(target_category_name, '').split(',')
# append category_settings['use_main_keywords'] into the main_query list including the keywords from cluster_keywords_list
if category_settings['use_main_keywords']:
# Convert the string of keywords into a list
main_keywords_list = category_settings['use_main_keywords'].split(",") # assuming keywords are comma-separated
cluster_keywords_list.extend(main_keywords_list)
# Remove empty strings from the list and deduplicate
cluster_keywords_list = list(set([keyword.strip() for keyword in cluster_keywords_list if keyword.strip()]))
# Convert the list back to a comma-separated string and update the dictionary
cluster_keywords_dict[target_category_name] = ','.join(cluster_keywords_list)
cluster_keywords = cluster_keywords_dict
category_id = target_category_info['id']
### Unique keywords ###
# Extracting the keys and joining them with commas
top_keywords = ', '.join(cluster_keywords.values())
# Creating the second string
key_value_string = ', '.join([f"{key}, {value}" for key, value in cluster_keywords.items()])
keywords_list = key_value_string.split(', ')
unique_keywords_list = list(set(keywords_list))
unique_keywords_string = ', '.join(unique_keywords_list)
# Introduce faq_count
faq_count = category_settings['add_faq']
# Initialize an empty list to store each individual FAQ
all_faqs = []
finished_faqs = []
# Surround FAQ generation with the desired div
faq_wrapper_start = '<div itemscope itemtype="https://schema.org/FAQPage">'
faq_wrapper_end = '</div>'
# Add counter for current iteration
iteration_counter = 0
# Extract all keywords from the dictionary's values outside the loop
additional_questions = category_settings['additional_instructions_faq']
all_keywords = ','.join(cluster_keywords.values()).split(',')
if additional_questions:
all_keywords += additional_questions.split(',')
num_keywords = len(all_keywords)
Config.socketio.emit('log', {'data': f'{formatted_now}: Preparation for FAQs is ready. {num_keywords} FAQs will be generated'},room=str(project_id), namespace='/')
# Ensure the loop doesn't exceed the number of keywords available
loop_limit = num_keywords
all_keyphrases = []
for i in range(loop_limit):
### STOP PROCESS IF USER CLICKS STOP ###
if stop_category_process.get(project_id, False):
stop_category(project_id) # Stop process
Config.socketio.emit('log', {'data': f'{formatted_now}: Process stopped by user.'}, room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
db.session.commit()
return
iteration_counter += 1 # Increment the counter
Config.socketio.emit('log', {'data': f'{formatted_now}: Working on FAQ #{iteration_counter}'}, room=str(project_id), namespace='/')
# Directly set the current_keyword using the index i
current_keyword = all_keywords[i]
# Call the function to get related searches for the current_keyword
Config.socketio.emit('log', {'data': f'{formatted_now}: Searching Google for related searches for {current_keyword}'},room=str(project_id), namespace='/')
keyphrases_dict = keywords_one_level(db, Category_Settings, app_settings, main_query=current_keyword, project_id=project_id)
# Extract keywords from the dictionary's values and create a list
keyphrases_list = ','.join(keyphrases_dict.values()).split(',')
all_keyphrases.append(keyphrases_list)
# Here, you can continue with the rest of your logic using current_keyword and keyphrases_list
prompt = ''
### INTRO ###
prompt += f"*** GENERAL FAQ INSTRUCTIONS: ***\n"
prompt += f"I want you to act as a proficient SEO content writer for FAQs in {app_settings['language']}"
prompt += f"Each answer MUST contains minimum {category_settings['faq_length']} words.\n"
prompt += f"Craft a 100% unique, SEO-optimized question and answer for the FAQ section in {app_settings['language']}. "
### INSTRUCTIONS FOR QUESTIONS ###
prompt += f"INSTRUCTIONS FOR CRAFTING QUESTIONS:\n"
prompt += f"This is question #{iteration_counter}. Craft a question from this keyword: '{current_keyword}' for the category: '{target_category_name}' with H3 tag.\n"
if category_settings["include_category_info_faq"]:
temp_prompt = ""
# Check if same_level_categories has content
if same_level_categories:
temp_prompt += f"Craft questions related with the same level categories: {same_level_categories} "
# Check if sub_level_categories is not empty
if sub_level_categories:
temp_prompt += f"and sub-categories with their links to their category page: {sub_level_categories} "
if temp_prompt: # Only add the keyword instruction if temp_prompt has content
temp_prompt += f"as an SEO expert with a focus on keyword stemming, you must derive and choose the most appropriate stemmed variations from the keywords provided in this list: '{keyphrases_list}'. Understand the core meaning and concept of each keyword, and then incorporate these stemmed variations intuitively and naturally across the entire text.\n"
# Append or concatenate temp_prompt to the main prompt
prompt += temp_prompt
### INSTRUCTIONS FOR ANSWERS ###
prompt += f"*** INSTRUCTIONS FOR CRAFTING ANSWERS: ***\n"
# Unique keywords
#prompt += f"To craft an answer, it is important and mandatory to use the following keyphrases: '{keyphrases_list}'."
prompt += f"As an SEO expert it is important and mandatory to focus on keyword stemming, you must derive and choose the most appropriate stemmed variations from the keywords provided in this list: '{keyphrases_list}'. Understand the core meaning and concept of each keyword, and then incorporate these stemmed variations intuitively and naturally across the entire text."
prompt += f"This keywords are related to the main keyword {current_keyword} that the question is crafted from. You must include it as well!\n"
if prop_output:
prompt += f"in combination with this category properties: {prop_output}. It is important to communicate the primary features and functions that these products typically have in combination with the keyphrases provided. \n"
prompt += f"It is important to make sure you bold added keyphrases for more visibility. That way your FAQs will be SEO optimized.\n"
# Products information
#if products_info:
if category_settings['include_faq_info']:
prompt += f"When the question is related with sales, pricing, how cheap the good is or anything related. The key is to craft a pre sales or post sales question that will help customers to make an informed decision based on their specific needs and preferences. "
product_info = []
# Check if entry_level_products has content
if entry_level_products:
product_info.append(f"For entry level products: {entry_level_products}")
# Check if mid_size_products has content
if mid_size_products:
product_info.append(f"middle size products: {mid_size_products}")
# Check if hi_end_products has content
if hi_end_products:
product_info.append(f"and the high-end (flagman) products: {hi_end_products}")
# If there's product information available, construct the prompt
if product_info:
temp_prompt = "Craft a question related with the best products or pricing ranges and use the following information for product levels to highlight the best products for each category level. "
# Join the product information together with commas and spaces
product_info_str = ', '.join(product_info)
# Append or concatenate product_info_str to the temp prompt
temp_prompt += product_info_str + ". *** DO NOT MENTION EXACT PRICES ***\n"
# Append or concatenate temp_prompt to the main prompt
prompt += temp_prompt
prompt += f"Format the list of the products in visible and readable way. Make sure to link the products.\n"
# Top brands information
has_top_brands_info = (isinstance(top_brands, list) and len(top_brands) > 0) or \
(isinstance(top_brands, str) and top_brands.strip() != "")
if has_top_brands_info:
if category_settings["add_top_brands_faq"] and not category_settings['faq_top_brands_links']:
prompt += f"Highlighting the Top Brands at the FAQs: {top_brands}, mentioning unique selling points of each in few lines. Do not link each brand. Do not use the URLs.\n\n"
elif category_settings["add_top_brands_faq"] and category_settings['faq_top_brands_links']:
prompt += f"7. Highlighting the Top Brands at the FAQs: {top_brands}, mentioning unique selling points of each in few lines. Make a link of each brand.\n"
# Categpry related information
if category_settings["include_category_info_faq"]:
temp_prompt = ""
# Check if same_level_categories has content
if same_level_categories:
temp_prompt += f"At your answer add information related with the same level categories: {same_level_categories} "
# Check if sub_level_categories is not empty
if sub_level_categories:
temp_prompt += f"and sub-categories: {sub_level_categories} by adding links to their category page"
if temp_prompt: # Only add the keyword instruction if temp_prompt has content
temp_prompt += f"focus on keyword stemming, you must derive and choose the most appropriate stemmed variations from the keywords provided in this list: '{current_keyword}, {keyphrases_list}'. . Understand the core meaning and concept of each keyword, and then incorporate these stemmed variations intuitively and naturally across the entire FAQ.\n"
# Append or concatenate temp_prompt to the main prompt
prompt += temp_prompt
### GENERAL CONCLUSION INSTRUCTIONS ###
prompt += f"*** FINAL INSTRUCTIONS: ***\n"
prompt += f"Each Question and Answer should be written in your own words, without copying from other sources and must use provided keyphrases in {app_settings['language']}\n"
prompt += f"Utilize an informal tone, personal pronouns, active voice, rhetorical questions, and incorporate analogies and metaphors. Keep the text simple, brief and very well formated.\n"
#prompt += "IMPORTANT!: YOU MUST to format the given answer text with appropriate HTML tags to make it well-organized and visually appealing. IT IS A MUST"
prompt += "DO NOT ADD OR REFFER TO ANY WORDS OF THE THIS PROMPT! ALL OF THE INFORMATION IS FOR BUILDING THE PERFECT FAQ section! DO NOT MENTION ANYTHING FROM THE PROMPT IN ANY CASE!\n"
if app_settings['print_prompt']:
Config.socketio.emit('log', {'data': f'{formatted_now}: FAQ System prompt:\n{system_prompt}'},room=str(project_id), namespace='/')
Config.socketio.emit('log', {'data': f'{formatted_now}: FAQ Prompt:\n{prompt}'},room=str(project_id), namespace='/')
return
### STOP PROCESS IF USER CLICKS STOP ###
if stop_category_process.get(project_id, False):
stop_category(project_id) # Stop process
Config.socketio.emit('log', {'data': f'{formatted_now}: Process stopped by user.'},room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
db.session.commit()
return
Config.socketio.emit('log', {'data': f'{formatted_now}: Content creation for FAQ #{iteration_counter}'},room=str(project_id), namespace='/')
for attempt in range(15):
try:
response = openai.ChatCompletion.create(
model=app_settings['model'],
messages=[
{"role": "user", "content": prompt},
{"role": "system", "content": system_prompt},
],
temperature=0,
)
# If the request was successful, append FAQ to all_faqs and break out of the loop
all_faqs.append(response['choices'][0]['message']['content'])
break
except openai.error.AuthenticationError:
# Handle authentication errors (e.g., invalid API key)
Config.socketio.emit('log', {'data': 'Authentication Error. Check your OpenAI API Key!'}, room=str(1), namespace='/')
break
except (openai.error.APIError, openai.error.Timeout, openai.error.ServiceUnavailableError) as e:
# Handle APIError, Timeout, and ServiceUnavailableError for retry
wait_time = 2 * (attempt + 1)
Config.socketio.emit('log', {'data': f'{formatted_now}: Encountered an issue: {e.error}. Waiting for {wait_time} seconds before retrying.'},room=str(project_id), namespace='/')
time.sleep(wait_time)
except Exception as e:
# Handle all other exceptions without retrying
Config.socketio.emit('log', {'data': f'{formatted_now}: {e}'},room=str(project_id), namespace='/')
break
else:
raise Exception("Maximum number of retries exceeded.")
#### KEYWORDS CLEANUP ####
all_faq_keywords = [item for sublist in all_keyphrases for item in sublist] + all_keywords
# Remove duplicates
all_faq_keywords = list(set(all_faq_keywords))
unique_all_faq_keywords_string = ', '.join(all_faq_keywords)
### Save to the db the cluster keywords ###
import query
#Get keywords from the Processed_category table from column category_keywords and merge them with the new keywords
category_keywords_records = db.session.query(Processed_category).filter_by(project_id=project_id, category_id=category_id).first()
if category_keywords_records.category_keywords:
category_keywords_list = category_keywords_records.category_keywords.split(', ')
category_keywords_list = [keyword.lower().strip() for keyword in category_keywords_list]
unique_keywords_list = unique_all_faq_keywords_string.split(', ')
unique_keywords_list = [keyword.lower().strip() for keyword in unique_keywords_list]
distinct_keywords_set = set(category_keywords_list + unique_keywords_list)
distinct_keywords_list = list(distinct_keywords_set)
category_keywords = ', '.join(distinct_keywords_list)
query.save_processed_category(db, Processed_category, project_id, category_id, category_keywords=category_keywords)
else:
query.save_processed_category(db, Processed_category, project_id, category_id, category_keywords=unique_all_faq_keywords_string)
##########################
# Combine all FAQs and wrap them with the FAQ div
if category_settings['faq_include_category_name_at_headings']:
prompt = f"Craft FAQ section heading in {app_settings['language']} language for the category: {target_category_name}"
else:
prompt = f"Craft FAQ section heading in {app_settings['language']}"
system_prompt = f"Return just the heading. The language is {app_settings['language']}. *** DO NOT MENTION ANY INSTRUCTIONS FROM YOUR PROMPT! ***"
faq_heading = openai_generic(app_settings, prompt, system_prompt)
heading = f"<h2>{faq_heading}</h2>"
all_faqs.insert(0, heading)
# Adding Wikipedia link at the bottom of the FAQs
translated_category_name = google_translate(f"{target_category_name}", "en")
if category_settings['faq_include_category_name_at_headings'] and category_settings['faq_wiki_link_authority']:
#Config.socketio.emit('log', {'data': f'{formatted_now}: Building wikipedia link for category...'}, room=str(project_id), namespace='/')
#query = f"{target_category_name} wikipedia"
Config.socketio.emit('log', {'data': f'{formatted_now}: Searching Wikipedia for {target_category_name}...'}, room=str(project_id), namespace='/')
#wikilink = google_custom_search(query)
wikilink = get_wikipedia_url(target_category_name, lang='bg')
if wikilink:
Config.socketio.emit('log', {'data': f'{formatted_now}: Adding Wikipedia link: {wikilink}'}, room=str(project_id), namespace='/')
wikipedia_link = f'<a href="{wikilink}" target="_blank">read more at Wikipedia</a>'
else:
Config.socketio.emit('log', {'data': f'{formatted_now}: No results found at WIKIPEDIA BG for {target_category_name}. Searching english version...'}, room=str(project_id), namespace='/')
Config.socketio.emit('log', {'data': f'{formatted_now}: Searching Wikipedia for {translated_category_name}...'}, room=str(project_id), namespace='/')
wikilink = get_wikipedia_url(translated_category_name, lang='en')
if wikilink:
Config.socketio.emit('log', {'data': f'{formatted_now}: Adding Wikipedia link: {wikilink}'}, room=str(project_id), namespace='/')
wikipedia_link = f'<a href="{wikilink}" target="_blank">read more at Wikipedia</a>'
else:
Config.socketio.emit('log', {'data': f'{formatted_now}: No results found for {translated_category_name} at WIKIPEDIA EN. Searching Google...'}, room=str(project_id), namespace='/')
query = f"{translated_category_name} wikipedia"
wikilink = google_custom_search(query)
Config.socketio.emit('log', {'data': f'{formatted_now}: Validating the link...'}, room=str(project_id), namespace='/')
if is_valid_url(wikilink): # Validate the URL
Config.socketio.emit('log', {'data': f'{formatted_now}: Adding Wikipedia link: {wikilink}'}, room=str(project_id), namespace='/')
wikipedia_link = f'<a href="{wikilink}" target="_blank">read more at Wikipedia</a>'
else:
Config.socketio.emit('log', {'data': f'{formatted_now}: The link is invalid: {wikilink}'}, room=str(project_id), namespace='/')
wikipedia_link = target_category_name # Use plain text if the URL is not valid
prompt = f"Add a link to the Wikipedia page: {wikipedia_link} to read more about {target_category_name}\n"
system_prompt = f"The language must be {app_settings['language']}. *** DO NOT MENTION ANY INSTRUCTIONS FROM YOUR PROMPT! ***"
read_more_at_wiki = openai_generic(app_settings, prompt, system_prompt)
read_more = f"<p>{read_more_at_wiki}</p>"
all_faqs.append(read_more)
# Action 1: Make a link to the brand
if len(top_brands) > 0 and category_settings['faq_top_brands_links']:
Config.socketio.emit('log', {'data': f'{formatted_now}: Building brand link for FAQs...'}, room=str(project_id), namespace='/')
brand = random.choice(top_brands)[0]
query_brand = f"{translated_category_name} {brand} official website"
Config.socketio.emit('log', {'data': f'{formatted_now}: Searching Google for trusted website for {brand}'}, room=str(project_id), namespace='/')
brand_link = google_custom_search(query_brand)
Config.socketio.emit('log', {'data': f'{formatted_now}: Validating the link...'}, room=str(project_id), namespace='/')
if is_valid_url(brand_link): # Validate the URL
linked_brand = f'<a href="{brand_link}">{brand}</a>'
else:
linked_brand = brand # Use plain text if the URL is not valid
# Replace brand name in all_faqs
for idx, faq in enumerate(all_faqs):
if brand in faq:
all_faqs[idx] = faq.replace(brand, linked_brand, 1)
if category_settings['faq_use_schema']:
finished_faqs = faq_wrapper_start + ''.join(all_faqs) + faq_wrapper_end
else:
finished_faqs = ''.join(all_faqs)
return finished_faqs
### CRAFT DESCRIPTION FOR THE CATEGORY ###
def generate_category_description(db, Category_Settings, Processed_category, app_settings, category_settings, seo_settings, results, project_id):
openai.api_key = app_settings['openai_key']
now = datetime.now()
formatted_now = now.strftime("%d/%m/%Y %H:%M:%S")
if results is None:
Config.socketio.emit('log', {'data': f'{formatted_now}: No properties found for the category.'},room=str(project_id), namespace='/')
return
###################################
### DEFINIG IMPORTANT VARIABLES ###
###################################
target_category_info = results['target_category_info']
target_category_name = target_category_info['name']
target_category_url = target_category_info['url_handle']
target_category_properties = target_category_info['properties']
prop_output = ''
if category_settings['include_properties']:
if isinstance(target_category_properties, dict):
for i, (prop, values) in enumerate(target_category_properties.items()):
# If we have already processed the maximum number of properties, break the loop
if i >= category_settings["max_property_values"]:
break
prop_output += f'Property name "{prop}", '
for val in values:
# Check the type of val
if isinstance(val, list):
id_ = val[0]
url_ = val[1] # assuming the second item in the list is always a URL
prop_output += f'Value {id_}, Link: {url_}, '
elif isinstance(val, dict):
id_ = val["id"]
url_ = val["url"]
keywords = val["keywords"]
prop_output += f'Keywords: {", ".join(keywords)}, Links: {url_}, '
elif isinstance(target_category_properties, list):
# handle the list case here, for example:
for prop in target_category_properties:
prop_output += f'Property: {prop}, '
else:
raise ValueError("Unexpected type for target_category_properties")
top_level_category = []
same_level_categories = []
sub_level_categories = []
category_info = results.get('category_info')
if category_info is None:
top_level_category = []
same_level_categories = []
sub_level_categories = []
else:
# Same level categories information
top_level_category_info = category_info.get('root_category')
if top_level_category_info and 'name' in top_level_category_info and 'url' in top_level_category_info:
top_level_category = [(top_level_category_info['name'], top_level_category_info['url'])]
else:
top_level_category = []
same_level_categories_info = category_info.get('same_level_categories', [])
same_level_categories = [(cat['name'], cat['url']) for cat in same_level_categories_info
if cat and 'name' in cat and 'url' in cat]
# Sub level categories information
sub_level_categories_info = category_info.get('sub_level_categories', [])
sub_level_categories = [(cat['name'], cat['url']) for cat in sub_level_categories_info
if cat and 'name' in cat and 'url' in cat]
entry_level_products = []
mid_size_products = []
hi_end_products = []
if category_settings['include_sales_info']:
products_info = results.get('products_by_sales', {})
entry_level_products = []
mid_size_products = []
hi_end_products = []
if 'entry_level_products' in products_info:
entry_level_products = [(prod['name'], prod['product_url'], prod['price'], prod['image_url'])
for prod in products_info['entry_level_products']
if 'name' in prod and 'product_url' in prod and 'price' in prod and 'image_url' in prod]
if 'mid_size_products' in products_info:
mid_size_products = [(prod['name'], prod['product_url'], prod['price'], prod['image_url'])
for prod in products_info['mid_size_products']
if 'name' in prod and 'product_url' in prod and 'price' in prod and 'image_url' in prod]
if 'hi_end_products' in products_info:
hi_end_products = [(prod['name'], prod['product_url'], prod['price'], prod['image_url'])
for prod in products_info['hi_end_products']
if 'name' in prod and 'product_url' in prod and 'price' in prod and 'image_url' in prod]
top_brands = []
if category_settings['add_top_brands']:
top_brands = [(brand['Brand name'], brand['url']) for brand in results.get('best_selling_brands', [])]
### STOP PROCESS IF USER CLICKS STOP ###
if stop_category_process.get(project_id, False):
stop_category(project_id) # Stop process
Config.socketio.emit('log', {'data': f'{formatted_now}: Process stopped by user.'},room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
db.session.commit()
return
# Check if top_brands contains meaningful information
has_top_brands_info = (isinstance(top_brands, list) and len(top_brands) > 0) or \
(isinstance(top_brands, str) and top_brands.strip() != "")
##########################
#### KEYWRDS RESEARCH ####
##########################
# Check DB Processed categories for the cluster keywords for the target category and if they exist, use them else search for them
#cluster_keywords_dict = query.get_processed_category(db, Processed_category, project_id, target_category_info['id'])
#if cluster_keywords_dict is None:
category_id = target_category_info['id']
unique_keywords_string = ''
if seo_settings['generic_keywords']:
# Check if unique_keywords_string is empty from the DB
category_keywords_records = db.session.query(Processed_category).filter_by(project_id=project_id, category_id=category_id).first()
if category_keywords_records.category_keywords:
unique_keywords_string = category_keywords_records.category_keywords
else:
Config.socketio.emit('log', {'data': f'{formatted_now}: Searching Google for related searches for {target_category_name}'},room=str(project_id), namespace='/')
cluster_keywords_dict = keyword_subsequence(db, Category_Settings, app_settings, main_query=target_category_name, project_id=project_id)
# Process the result to ensure it matches the format of keywords_one_level
for key, value in cluster_keywords_dict.items():
if isinstance(value, dict) and 'error' in value:
cluster_keywords_dict[key] = "error: " + value['error']
elif isinstance(value, list):
cluster_keywords_dict[key] = ', '.join(value)
# Get the keywords from cluster_keywords_dict and convert them into a list
cluster_keywords_list = cluster_keywords_dict.get(target_category_name, '').split(',')
# append category_settings['use_main_keywords'] into the main_query list including the keywords from cluster_keywords_list
if category_settings['use_main_keywords']:
# Convert the string of keywords into a list
main_keywords_list = category_settings['use_main_keywords'].split(",") # assuming keywords are comma-separated
cluster_keywords_list.extend(main_keywords_list)
# Remove empty strings from the list and deduplicate
cluster_keywords_list = list(set([keyword.strip() for keyword in cluster_keywords_list if keyword.strip()]))
# Convert the list back to a comma-separated string and update the dictionary
cluster_keywords_dict[target_category_name] = ','.join(cluster_keywords_list)
cluster_keywords = cluster_keywords_dict
# Creating the second string
key_value_string = ', '.join([f"{key}, {value}" for key, value in cluster_keywords.items()])
keywords_list = key_value_string.split(', ')
unique_keywords_list = list(set(keywords_list))
unique_keywords_string = ', '.join(unique_keywords_list)
### Save to the db the cluster keywords ###
import query
#Get keywords from the Processed_category table from column category_keywords and merge them with the new keywords
category_keywords_records = db.session.query(Processed_category).filter_by(project_id=project_id, category_id=category_id).first()
if category_keywords_records.category_keywords:
category_keywords_list = category_keywords_records.category_keywords.split(', ')
category_keywords_list = [keyword.lower().strip() for keyword in category_keywords_list]
unique_keywords_list = unique_keywords_string.split(', ')
unique_keywords_list = [keyword.lower().strip() for keyword in unique_keywords_list]
distinct_keywords_set = set(category_keywords_list + unique_keywords_list)
distinct_keywords_list = list(distinct_keywords_set)
category_keywords = ', '.join(distinct_keywords_list)
query.save_processed_category(db, Processed_category, project_id, category_id, category_keywords=category_keywords)
else:
query.save_processed_category(db, Processed_category, project_id, category_id, category_keywords=unique_keywords_string)
###############
### PROMPTS ###
###############
class ContentSection:
def __init__(self, condition, func, name, *args):
self.condition = condition
self.func = func
self.name = name
self.args = args
self.content = ""
self.token_count = 0
def generate_content(self, system_prompt):
if self.condition:
# Emit the log for the execution of the section
Config.socketio.emit('log', {'data': f'{formatted_now}: Writing: {self.name}'}, room=str(project_id), namespace='/')
# Generate the prompt using the function associated with the section
prompt = self.func(*self.args)
if app_settings['print_prompt']:
Config.socketio.emit('log', {'data': f'{formatted_now}: System prompt:\n{system_prompt}'},room=str(project_id), namespace='/')
Config.socketio.emit('log', {'data': f'{formatted_now}: Prompt:\n{prompt}'},room=str(project_id), namespace='/')
return
# Construct messages for the section using the system prompt and the generated prompt
messages = construct_messages_for_section(system_prompt, prompt)
# Generate the content for the section
self.content, self.token_count = generate_part(db, Category_Settings, messages, app_settings, category_settings, seo_settings, results, project_id)
# Define sections using the ContentSection class
sections = [
# Section for the intro prompt
ContentSection(
category_settings["include_intro"],
compile_intro_prompt,
"Introduction content",
app_settings, category_settings, seo_settings, target_category_name, target_category_url, unique_keywords_string
),
# Section for properties prompt, runs only if prop_output is present
ContentSection(
bool(prop_output),
compile_properties_prompt,
"Category Properties content",
app_settings, seo_settings, target_category_name, prop_output, unique_keywords_string
),
# Section for product levels prompt, runs only if "include_sales_info" is enabled in category_settings
ContentSection(
category_settings["include_sales_info"] and category_settings["add_best_selling_products"] > 0,
compile_product_levels_intro_prompt,
"Intro for best selling products content",
category_settings, target_category_name, entry_level_products, mid_size_products, hi_end_products, app_settings, seo_settings, unique_keywords_string
),
# Section for product levels prompt, runs only if "include_sales_info" is enabled in category_settings
ContentSection(
category_settings["include_sales_info"] and category_settings["add_best_selling_products"] > 0 and entry_level_products,
compile_product_levels_entry_prompt,
"Entry Levels content",
category_settings, target_category_name, entry_level_products, mid_size_products, hi_end_products, app_settings, seo_settings, unique_keywords_string
),
ContentSection(
category_settings["include_sales_info"] and category_settings["add_best_selling_products"] > 0 and mid_size_products,
compile_product_levels_mid_prompt,
"Mid Levels content",
category_settings, target_category_name, entry_level_products, mid_size_products, hi_end_products, app_settings, seo_settings, unique_keywords_string
),
ContentSection(
category_settings["include_sales_info"] and category_settings["add_best_selling_products"] > 0 and hi_end_products,
compile_product_levels_high_prompt,
"Hi-end Levels content",
category_settings, target_category_name, entry_level_products, mid_size_products, hi_end_products, app_settings, seo_settings, unique_keywords_string
),
# Section for top brands prompt, runs only if both "has_top_brands_info" and "add_top_brands" are true
ContentSection(
has_top_brands_info and category_settings["add_top_brands"] > 0,
compile_top_brands,
"Top Brands content",
app_settings, seo_settings, target_category_name, top_brands, unique_keywords_string
),
# Section for category levels prompt, runs only if "include_category_info" is enabled in category_settings
ContentSection(
category_settings["enable_additional_instructions"],
compile_additional_info_prompt,
"Additional info content",
app_settings, category_settings, seo_settings, unique_keywords_string, target_category_name
),
# Section for category levels prompt, runs only if "include_category_info" is enabled in category_settings
ContentSection(
category_settings["include_category_info"],
compile_category_levels,
"Category Levels content",
app_settings, seo_settings, top_level_category, same_level_categories, sub_level_categories, unique_keywords_string, target_category_name
)
]
system_prompt = (
'The output must be a coherent and detailed description of the category in question in strictly and valid HTML format. It should '
f'be written in {app_settings["language"]}. The output should contains valid HTML code except tags like H1, newline, body and other main tags.'
'For the headings use H3 and before each heading add one additional new line for better readability.'
'For bold use strong tag, for italic use em tag.'
f'The output must be ONLY the description explicitly and you should keep the {app_settings["language"]} language from the prompt!'
)
# Generate content for each section
for section in sections:
section.generate_content(system_prompt)
# Calculate the total tokens used across all sections
# Query the Processed_category table for the column token_count for this category
record = db.session.query(Processed_category).filter_by(project_id=project_id, category_id=category_id).first()
current_token_count = record.token_count if record else 0
if current_token_count is None or current_token_count == 0:
total_tokens_used = sum([section.token_count for section in sections])
import query
query.save_processed_category(db, Processed_category, project_id, category_id, token_count=total_tokens_used)
else:
total_tokens_used = sum([section.token_count for section in sections])
updated_token_count = current_token_count + total_tokens_used
import query
query.save_processed_category(db, Processed_category, project_id, category_id, token_count=updated_token_count)
# Combine the content of all sections
final_article = "".join([section.content for section in sections])
return final_article
def generate_part(db, Category_Settings, messages, app_settings, category_settings, seo_settings, results, project_id):
now = datetime.now()
formatted_now = now.strftime("%d/%m/%Y %H:%M:%S")
category_id = category_settings['category_id']
for attempt in range(15):
### STOP PROCESS IF USER CLICKS STOP ###
if stop_category_process.get(project_id, False):
stop_category(project_id) # Stop process
Config.socketio.emit('log', {'data': f'{formatted_now}: Process stopped by user.'},room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
db.session.commit()
return
try:
response = openai.ChatCompletion.create(
model=app_settings['model'],
messages=messages,
temperature=0,
)
# If the request was successful, break out of the loop
break
except openai.error.AuthenticationError:
# Handle authentication errors (e.g., invalid API key)
Config.socketio.emit('log', {'data': 'Authentication Error. Check your OpenAI API Key!'}, room=str(1), namespace='/')
break
except (openai.error.APIError, openai.error.Timeout, openai.error.ServiceUnavailableError, ) as e:
# Handle APIError, Timeout, and ServiceUnavailableError for retry
wait_time = 2 * (attempt + 1)
Config.socketio.emit('log', {'data': f'{formatted_now}: Encountered an issue: {e.error}. Waiting for {wait_time} seconds before retrying.'},room=str(project_id), namespace='/')
time.sleep(wait_time)
except (openai.error.InvalidRequestError) as e:
Config.socketio.emit('log', {'data': f'{formatted_now}: {e}'},room=str(project_id), namespace='/')
break
except Exception as e:
# Handle all other exceptions without retrying
Config.socketio.emit('log', {'data': f'{formatted_now}: {e}'},room=str(project_id), namespace='/')
break
else:
raise Exception("Maximum number of retries exceeded.")
# Get the main content
total_tokens = response['usage']['total_tokens']
main_content = response['choices'][0]['message']['content']
return main_content, total_tokens
def processCategories(db, Processed_category, Category_Settings, project_id, x_cloudcart_apikey, store_url, model):
# Fetch all processed category IDs for the given project
processed_categories = db.session.query(Processed_category.category_id).filter_by(project_id=project_id).all()
processed_category_ids = {category[0] for category in processed_categories}
# Fetch all categories
app_settings = {
'X-CloudCart-ApiKey': x_cloudcart_apikey,
'url': store_url,
}
categories = getCategoryDetails(app_settings, fields=['id', 'name', 'url_handle'])
# Filter out categories that have already been processed
unprocessed_categories = [category for category in categories if int(category.get('id')) not in processed_category_ids]
# Iterate and save each unprocessed category
for category in unprocessed_categories:
category_id = category.get('id', None)
#if not has_products_in_category(app_settings, category_id):
# print(f"Skipping category ID {category_id} because it has no products.")
# continue # skip to the next category if no products
category_name = category.get('name', None)
category_url_handle = category.get('url_handle', None)
if category_url_handle:
full_category_url = f"{store_url}/category/{category_url_handle}"
else:
full_category_url = None
# Since the description is not fetched in the current API call, it will be None.
# If you want to fetch it, adjust the fields in the getCategoryDetails call above.
description = category.get('description', None)
new_category = Processed_category(
project_id=project_id,
category_id=category_id,
category_name=category_name.lower(),
category_url=full_category_url,
category_description=description
)
db.session.add(new_category)
db.session.commit()
def cat(db, Processed_category, Category_Settings, app_settings, category_settings, seo_settings, project_id):
now = datetime.now()
formatted_now = now.strftime("%d/%m/%Y %H:%M:%S")
category_id = category_settings['category_id']
Config.socketio.emit('log', {'data': f'{formatted_now}: Started...'},room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = True
db.session.commit()
stop_category_process[project_id] = False
# Get category IDs
if not category_settings['category_id']:
Config.socketio.emit('log', {'data': f'{formatted_now}: No specific category ID provided. The script will run for all categories.'}, room=str(project_id), namespace='/')
category_ids = getCategoryIds(app_settings)
# Ensure category_ids is sorted in ascending order
category_ids.sort()
# Get the last processed category ID
last_processed_id = get_last_processed_category(db, Processed_category, project_id)
if last_processed_id:
# Convert the last_processed_id to a string
str_last_processed_id = str(last_processed_id)
# Find the index of the last processed category ID in the category_ids list
try:
index_of_last_processed = category_ids.index(str_last_processed_id)
# Slice the list to start from the next category ID
category_ids = category_ids[index_of_last_processed + 1:]
except ValueError:
# This means the last processed category ID was not found in the category_ids list
# So, continue processing all category IDs
pass
else:
category_ids = [category_settings['category_id']]
# Prepare a list to hold all results
results = []
# Iterate over category IDs
for category_id in category_ids:
Config.socketio.emit('log', {'data': f'{formatted_now}: Processing category ID: {category_id} '},room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = True
db.session.commit()
### STOP PROCESS IF USER CLICKS STOP ###
if stop_category_process.get(project_id, False):
stop_category(project_id) # Stop process
Config.socketio.emit('log', {'data': f'{formatted_now}: Process stopped by user.'},room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
db.session.commit()
return
# Get category info
target_category_info = getTargetCategoryInfo(db, Processed_category, app_settings, category_id, category_settings, seo_settings, project_id)
### STOP PROCESS IF USER CLICKS STOP ###
if stop_category_process.get(project_id, False):
stop_category(project_id) # Stop process
Config.socketio.emit('log', {'data': f'{formatted_now}: Process stopped by user.'}, room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
db.session.commit()
return
categories_by_id, children_by_parent_id = getCategoryInfo(app_settings)
result = {}
result['target_category_info'] = target_category_info # 'name', 'description', 'url_handle'
if category_settings['include_category_info'] or category_settings['include_category_info_faq']:
category_info = getCategoryLevels(app_settings, category_id, categories_by_id, children_by_parent_id)
result['category_info'] = category_info
if category_settings['include_sales_info'] or category_settings['include_faq_info']:
# Check if both add_best_selling_products and add_top_brands are not 0
if category_settings['add_best_selling_products'] != 0 or category_settings['add_top_brands'] != 0 or category_settings['add_best_selling_products_faq'] or category_settings['add_top_brands_faq']:
products_by_sales, best_selling_brands = getOrderedProductsbySales(category_id, app_settings, category_settings, target_category_info)
result['products_by_sales'] = products_by_sales
result['best_selling_brands'] = best_selling_brands
# Save the processed category
# This is information about all fields from the database
# project_id, category_id, category_structure, category_name, category_prompt, category_description, category_faqs, category_keywords, category_custom_keywords
query.save_processed_category(db, Processed_category, project_id, category_id, category_structure=result)
### STOP PROCESS IF USER CLICKS STOP ###
if stop_category_process.get(project_id, False):
stop_category(project_id) # Stop process
Config.socketio.emit('log', {'data': f'{formatted_now}: Process stopped by user.'},room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
db.session.commit()
return
# Add the result for the current category to the list of results
results.append(result)
description = ''
if app_settings['enable_category_description']:
description = generate_category_description(db, Category_Settings, Processed_category, app_settings, category_settings, seo_settings, result, project_id)
# Save the processed category
# This is information about all fields from the database
# project_id, category_id, category_structure, category_prompt, category_description, category_faqs, category_keywords, category_custom_keywords
if app_settings['test_mode']:
query.save_processed_category(db, Processed_category, project_id, category_id, category_description=description, category_test_mode=True)
else:
query.save_processed_category(db, Processed_category, project_id, category_id, category_description=description, category_test_mode=False)
if category_settings['enable_faq_generation']:
faq = craft_faqs(db, Processed_category, Category_Settings, app_settings, category_settings, seo_settings, result, project_id)
# Save the processed category
# This is information about all fields from the database
# project_id, category_id, category_structure, category_prompt, category_description, category_faqs, category_keywords, category_custom_keywords
if app_settings['test_mode']:
query.save_processed_category(db, Processed_category, project_id, category_id, category_faqs=faq, category_test_mode=True)
else:
query.save_processed_category(db, Processed_category, project_id, category_id, category_faqs=faq, category_test_mode=False)
### Combine description with FAQ into one variable DESCRIPTION ###
description = description or ""
if app_settings['print_prompt'] == False:
if app_settings['enable_category_description'] and category_settings['enable_faq_generation']:
description = description + '\n' + faq
elif category_settings['enable_faq_generation'] and category_settings['append_faq']:
get_category_description = getTargetCategoryInfo(db, Processed_category, app_settings, category_id, category_settings, seo_settings, project_id, include_description=True)
# set the description into a string
description = get_category_description['description'][0] + '\n\n' + faq
elif category_settings['enable_faq_generation']:
description = faq
# Update the category description
if app_settings['test_mode']:
if app_settings['print_prompt'] == False:
Config.socketio.emit('log', {'data': f'{formatted_now}: Result completed: \n{description} '},room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
db.session.commit()
else:
Config.socketio.emit('log', {'data': f'{formatted_now}: Result completed: \n{description} '},room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
db.session.commit()
else:
if app_settings['print_prompt'] == True:
Config.socketio.emit('log', {'data': f'{formatted_now}: Result completed: \n{description} '},room=str(project_id), namespace='/')
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
db.session.commit()
else:
Config.socketio.emit('log', {'data': f'{formatted_now}: Updating category description for category ID: {category_id} '},room=str(project_id), namespace='/')
updateCategory(category_id, description, app_settings, project_id)
Config.socketio.emit('log', {'data': f'{formatted_now}: Proces completed. Updated category ID: {category_id} '},room=str(project_id), namespace='/')
# Return the list of results after the loop has finished
project = Category_Settings.query.filter_by(project_id=project_id, category_id=category_id).first()
if project:
project.in_progress = False
project.category_ready = False
db.session.commit()
category_updated = Processed_category.query.filter_by(project_id=project_id, category_id=category_id).first()
if category_updated:
category_updated.category_update = True
db.session.commit()
return
def updateCategory(category_id, description, app_settings, project_id):
now = datetime.now()
formatted_now = now.strftime("%d/%m/%Y %H:%M:%S")
headers = {
'X-CloudCart-ApiKey': app_settings['X-CloudCart-ApiKey'],
'Content-Type': 'application/vnd.api+json',
}
url = f"{app_settings['url']}/api/v2/categories/{category_id}"
attributes = {
"description": description
}
body = {
"data": {
"type": "categories",
"id": str(category_id),
"attributes": attributes
}
}
max_retries = 15
for attempt in range(max_retries):
try:
response = requests.patch(url, data=json.dumps(body), headers=headers)
if response.status_code in (429, 500, 502, 503): # Retry for status codes 500 and 503
raise Exception(f"Request to {url} failed with status code {response.status_code}. The response was: {response.text}")
elif response.status_code != 200: # For other non-200 status codes, fail immediately
raise Exception(f"Request to {url} failed with status code {response.status_code}. The response was: {response.text}")
return response.json() # If request was successful, break out of the loop and return the response
except Exception as e:
if attempt < max_retries - 1: # If it's not the last attempt, wait and then continue to the next iteration
wait_time = 5 * (attempt + 1)
Config.socketio.emit('log', {'data': f"{formatted_now}: Error occured at CloudCart. Waiting for {wait_time} seconds before retrying."},room=str(project_id), namespace='/')
time.sleep(wait_time)
else: # On the last attempt, fail with an exception
raise
def stop_category(project_id):
print(f'Stopping the process for project {project_id}...')
stop_category_process[project_id] = True
#main_query = "ะะกะะ ะะกะ ะะะคะะะะจะะะ, ะงะฐัะธ, ะจะธัะตัะฐ"
#project_id = 2
#result = keyword_clustersTEST(db, Category_Settings, app_settings, main_query, project_id)
#result = keyword_clusters(app_settings, main_query)
#print(result)
| [
"The output must be a coherent and detailed description of the category in question in strictly and valid HTML format. It should be written in PLACEHOLDER. The output should contains valid HTML code except tags like H1, newline, body and other main tags.For the headings use H3 and before each heading add one additional new line for better readability.For bold use strong tag, for italic use em tag.The output must be ONLY the description explicitly and you should keep the PLACEHOLDER language from the prompt!",
"7. Highlighting the Top Brands at the FAQs: PLACEHOLDER, mentioning unique selling points of each in few lines. Make a link of each brand.\n",
"Add a link to the Wikipedia page: PLACEHOLDER to read more about PLACEHOLDER\n",
"*** INSTRUCTIONS FOR CRAFTING ANSWERS: ***\n",
"Utilize an informal tone, personal pronouns, active voice, rhetorical questions, and incorporate analogies and metaphors. Keep the text simple, brief and very well formated.\n",
"as an SEO expert with a focus on keyword stemming, you must derive and choose the most appropriate stemmed variations from the keywords provided in this list: 'PLACEHOLDER'. Understand the core meaning and concept of each keyword, and then incorporate these stemmed variations intuitively and naturally across the entire text.\n",
"*** GENERAL FAQ INSTRUCTIONS: ***\n",
"focus on keyword stemming, you must derive and choose the most appropriate stemmed variations from the keywords provided in this list: 'PLACEHOLDER, PLACEHOLDER'. . Understand the core meaning and concept of each keyword, and then incorporate these stemmed variations intuitively and naturally across the entire FAQ.\n",
"Each answer MUST contains minimum PLACEHOLDER words.\n",
"This is question #PLACEHOLDER. Craft a question from this keyword: 'PLACEHOLDER' for the category: 'PLACEHOLDER' with H3 tag.\n",
"Return just the heading. The language is PLACEHOLDER. *** DO NOT MENTION ANY INSTRUCTIONS FROM YOUR PROMPT! ***",
"At your answer add information related with the same level categories: PLACEHOLDER ",
"in combination with this category properties: PLACEHOLDER. It is important to communicate the primary features and functions that these products typically have in combination with the keyphrases provided. \n",
"Craft a question related with the best products or pricing ranges and use the following information for product levels to highlight the best products for each category level. ",
"Craft a 100% unique, SEO-optimized question and answer for the FAQ section in PLACEHOLDER. ",
"Craft FAQ section heading in PLACEHOLDER",
"PLACEHOLDER. *** DO NOT MENTION EXACT PRICES ***\n",
"INSTRUCTIONS FOR CRAFTING QUESTIONS:\n",
"The language must be PLACEHOLDER. *** DO NOT MENTION ANY INSTRUCTIONS FROM YOUR PROMPT! ***",
"Highlighting the Top Brands at the FAQs: PLACEHOLDER, mentioning unique selling points of each in few lines. Do not link each brand. Do not use the URLs.\n\n",
"As a PLACEHOLDER SEO researcher working for PLACEHOLDER category. Craft maximum of PLACEHOLDER SEO-optimized keywords for each property name and its values. Use this PLACEHOLDER category name for a context when crafting the keywords and also \"price\" or \"affordable price\". This is an example of the array: PLACEHOLDER where \"season\" is the name of the property group, \"summer\" is the value of the property and the link is the link to the value. To craft a keyword, use the property name \"season\" as a context and use each values \"summer\" and \"winter\" to generate keywords. For example, use the property name as a context to generate 2 keywords for the value \"summer\" and 2 keywords for the value \"winter\". *** This is the actual information that you need to use to generate keywords: PLACEHOLDER ***",
"*** FINAL INSTRUCTIONS: ***\n",
"Craft questions related with the same level categories: PLACEHOLDER ",
"The output must be strictly valid JSON structure like this example: {\"y\":[{\"id\":\"1\",\"url\":\"https://shop.mdl.bg/category/bodi-women?y=1\",\"keywords\":[\"keyword1\",\"keyword 2\"]},{\"id\":\"2\",\"url\":\"https://shop.mdl.bg/category/bodi-women?y=2\",\"keywords\":[\"keyword1\",\"keyword 2\"]}]}*** The output must be ONLY the JSON structure explicitly and you should keep the PLACEHOLDER language from the prompt!***",
"It is important to make sure you bold added keyphrases for more visibility. That way your FAQs will be SEO optimized.\n",
"Craft FAQ section heading in PLACEHOLDER language for the category: PLACEHOLDER",
"and sub-categories with their links to their category page: PLACEHOLDER ",
"Each Question and Answer should be written in your own words, without copying from other sources and must use provided keyphrases in PLACEHOLDER\n",
"As an SEO expert it is important and mandatory to focus on keyword stemming, you must derive and choose the most appropriate stemmed variations from the keywords provided in this list: 'PLACEHOLDER'. Understand the core meaning and concept of each keyword, and then incorporate these stemmed variations intuitively and naturally across the entire text.",
"I want you to act as a proficient SEO content writer for FAQs in PLACEHOLDER",
"Craft an FAQs of the category in this strictly and valid format: PLACEHOLDER. The answer for each FAQ must be put at ***ANSWER-PLACEHOLDER***! Additionally: The text at the ***ANSWER-PLACEHOLDER*** must be with appropriate HTML tags to improve its structure. For new lines use \"br\", for bold: \"strong\". When you have listings use \"ul\" and \"li\" or \"ol\" for numbers, while preserving the existing tags. Emphasize headings, subheadings, and key points by new lines, and ensure the content flows coherently. DO NOT MENTION ANYTHING FROM THE PROMPT IN ANY CASE!",
"Format the list of the products in visible and readable way. Make sure to link the products.\n",
"This keywords are related to the main keyword PLACEHOLDER that the question is crafted from. You must include it as well!\n",
"DO NOT ADD OR REFFER TO ANY WORDS OF THE THIS PROMPT! ALL OF THE INFORMATION IS FOR BUILDING THE PERFECT FAQ section! DO NOT MENTION ANYTHING FROM THE PROMPT IN ANY CASE!\n",
"When the question is related with sales, pricing, how cheap the good is or anything related. The key is to craft a pre sales or post sales question that will help customers to make an informed decision based on their specific needs and preferences. ",
"Craft an FAQs of the category. It have to be written in PLACEHOLDER. For headings use H3. Make the text readable and for each text block you must add <p> tag. For new lines use \"br\", for bold: \"strong\". When you have listings use \"ul\" and \"li\" or \"ol\" for numbers, while preserving the existing tags. Emphasize headings, subheadings, and key points by new lines, and ensure the content flows coherently. DO NOT MENTION ANYTHING FROM THE PROMPT IN ANY CASE!",
"and sub-categories: PLACEHOLDER by adding links to their category page"
] |
2024-01-10 | smallv0221/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | sbyebss/monge_map_solver | src~models~img2text_model.py | import torch
from dalle2_pytorch import OpenAIClipAdapter
from dalle2_pytorch.dalle2_pytorch import l2norm
from dalle2_pytorch.optimizer import get_optimizer
from torchmetrics.classification.accuracy import Accuracy
import wandb
from src.models.base_model import BaseModule
from src.viz.points import plot_histogram
train_acc = Accuracy()
# pylint: disable=abstract-method,too-many-ancestors,arguments-renamed,line-too-long,arguments-differ,unused-argument
class Img2TextModule(BaseModule):
def __init__(self, cfg) -> None:
super().__init__(cfg)
self.clip = OpenAIClipAdapter(cfg.clip_model)
self.image_embed_scale = cfg.image_embed_dim**0.5
self.meaningful_class = None
self.class_emb = None
def on_train_start(self):
self.meaningful_class = self.trainer.datamodule.meaningful_class
meaningful_token = self.trainer.datamodule.meaningful_token
with torch.no_grad():
self.class_emb, _ = self.clip.embed_text(meaningful_token.to(self.device))
self.class_emb = l2norm(self.class_emb)
def get_real_data(self, src_data):
# src_data: (img_tensor, label)
src_img = src_data[0]
uniform_idx = torch.randint(self.class_emb.shape[0], (src_img.shape[0],))
trg_txt_emb = self.class_emb[uniform_idx]
with torch.no_grad():
src_img_emb, _ = self.clip.embed_image(src_img)
src_img_emb *= self.image_embed_scale
trg_txt_emb *= self.image_embed_scale
return src_img_emb, trg_txt_emb
# src_data, trg_data = batch
# # src_data: (img_tensor, label)
# # trg_data: (img_tensor, label)
# src_img, trg_label = src_data[0], trg_data[1]
# with torch.no_grad():
# src_img_emb, _ = self.clip.embed_image(src_img)
# trg_class = self.meaningful_class[trg_label.detach().cpu()]
# if self.global_step==1:
# print(trg_class)
# trg_token = tokenize(trg_class)
# trg_txt_emb, _ = self.clip.embed_text(trg_token.to(self.device))
# src_img_emb *= self.image_embed_scale
# trg_txt_emb *= self.image_embed_scale
# return src_img_emb, trg_txt_emb
def validation_step(self, batch, batch_idx):
data, target = batch
with torch.no_grad():
emb_image, _ = self.clip.embed_image(data)
adusted_emb = self.map_t(emb_image)
similarity = (100.0 * l2norm(adusted_emb) @ self.class_emb.T).softmax(dim=-1)
_, pred = similarity.topk(1, dim=-1)
pred = pred.squeeze(1).detach().cpu()
post_hoc_acc = train_acc(pred, target.cpu())
train_acc.reset()
similarity = (100.0 * l2norm(emb_image) @ self.class_emb.T).softmax(dim=-1)
_, zero_shot_pred = similarity.topk(1, dim=-1)
zero_shot_pred = zero_shot_pred.squeeze(1).detach().cpu()
zero_shot_acc = train_acc(zero_shot_pred, target.cpu())
train_acc.reset()
return pred, zero_shot_pred, post_hoc_acc, zero_shot_acc, data.shape[0]
def validation_epoch_end(self, outputs):
pred_list = []
zero_shot_pred_list = []
correct_test_count = 0
correct_zero_shot_count = 0
for pred, zero_shot_pred, post_hoc_acc, zero_shot_acc, batch_size in outputs:
pred_list.extend(list(pred.numpy()))
zero_shot_pred_list.extend(list(zero_shot_pred.numpy()))
correct_test_count += batch_size * post_hoc_acc
correct_zero_shot_count += batch_size * zero_shot_acc
accuracy = correct_test_count / len(pred_list)
zero_shot_accuracy = correct_zero_shot_count / len(zero_shot_pred_list)
self.log_dict(
{
"test_accuracy/post-hoc": accuracy,
"test_accuracy/zero-shot": zero_shot_accuracy,
}
)
torch.save(
{
"pred": pred_list,
"clip_pred": zero_shot_pred_list,
"acc": accuracy,
"clip_zero_shot": zero_shot_accuracy,
},
f"pred_acc_{self.current_epoch+1}.pt",
)
hist_path = f"hist_{self.current_epoch+1}.png"
plot_histogram(pred_list, num_class=self.class_emb.shape[0], path=hist_path)
wandb.log({"histogram": wandb.Image(hist_path, caption="pred")})
def configure_optimizers(self):
# These parameters are from LAION pretrained prior.
optim_map_kwargs = dict(
lr=self.cfg.lr_T, wd=self.cfg.wd, eps=1e-6, group_wd_params=True
)
optimizer_map = get_optimizer(self.map_t.parameters(), **optim_map_kwargs)
optim_f_kwargs = dict(
lr=self.cfg.lr_f, wd=self.cfg.wd, eps=1e-6, group_wd_params=True
)
optimizer_f = get_optimizer(self.f_net.parameters(), **optim_f_kwargs)
return optimizer_map, optimizer_f
# def pretrain_identity(self, data, map_opt):
# loss = F.mse_loss(self.map_t(data), data)
# map_opt.zero_grad()
# loss.backward()
# map_opt.step()
# self.log_dict({"pretrain_loss/id_loss": loss})
| [] |
2024-01-10 | sbyebss/monge_map_solver | src~models~text2img_model.py | import os
import cv2
import torch
from dalle2_pytorch import OpenAIClipAdapter
from dalle2_pytorch.dalle2_pytorch import l2norm
from dalle2_pytorch.optimizer import get_optimizer
from PIL import Image, ImageDraw, ImageFont
from torchvision.utils import make_grid, save_image
from src.callbacks.txt2img_callbacks import generate_grid_samples
from src.datamodules.utils import split_test_full_data
from src.logger.jam_wandb import prefix_metrics_keys
from src.models.base_model import BaseModule
from src.models.loss_zoo import gradientOptimality
from src.viz.points import compare_highd_kde_scatter
# pylint: disable=abstract-method,too-many-ancestors,arguments-renamed,line-too-long,arguments-differ,unused-argument,too-many-locals
sampling_labels = [
"Text emb.",
"Our emb.",
"laion emb.",
"Real emb.",
"Real Images",
]
# Function to add sampling_labels to an image
def add_labels_to_image(image_path):
# Load the image
with Image.open(image_path) as image:
draw = ImageDraw.Draw(image)
# Choose a font size
font_size = 30
# Load a font
font_path = os.path.join(cv2.__path__[0], "qt", "fonts", "DejaVuSans.ttf")
font = ImageFont.truetype(font_path, size=font_size)
# Get image dimensions
img_width, img_height = image.size
# Calculate the height required for the text
text_height = max(
[draw.textsize(label, font=font)[1] for label in sampling_labels]
)
# Create a new image with extra space for the text labels
new_img = Image.new(
"RGB", (img_width, img_height + text_height + 20), (255, 255, 255)
) # White background for the new space
# Paste the original image onto the new image
new_img.paste(image, (0, text_height + 20))
# Initialize ImageDraw to draw on the new image
draw = ImageDraw.Draw(new_img)
# Define the starting Y position for the text
text_y = 10 # Small padding from the top of the new image
# Calculate the width of a single column assuming sampling_labels are evenly spaced
column_width = img_width / len(sampling_labels)
# Iterate over the sampling_labels and their respective column positions
for idx, label in enumerate(sampling_labels):
# Calculate the position for each label (centered above each column)
text_width, text_height = draw.textsize(label, font=font)
text_x = idx * column_width + (column_width - text_width) / 2
# Draw the text on the new image
draw.text((text_x, text_y), label, font=font, fill=(0, 0, 0)) # Black text
# Save the new image
new_img.save(image_path)
class Text2ImgModule(BaseModule):
def __init__(self, cfg) -> None:
super().__init__(cfg)
self.clip = OpenAIClipAdapter(cfg.clip_model)
self.image_embed_scale = cfg.image_embed_dim**0.5
def get_real_data(self, batch):
src_data, trg_data = batch
# src_data: (image_embedding, tokenized_caption)
# trg_data: (image_embedding, tokenized_caption)
src_tokens, trg_img_emb = src_data[1], trg_data[0]
text_embed, text_encodings = self.clip.embed_text(src_tokens)
unnorm_text_embed = self.clip.clip.encode_text(src_tokens)
src_text_cond = {
"text_embed": text_embed,
"text_encodings": text_encodings,
"unnorm_text_embed": unnorm_text_embed,
}
trg_img_emb *= self.image_embed_scale
return src_text_cond, trg_img_emb
def loss_f(self, src_text_cond, trg_img_emb, mask=None):
with torch.no_grad():
tx_tensor = self.map_t(**src_text_cond)
# assert torch.isclose(tx_tensor.norm(dim=-1).mean(), trg_img_emb.norm(dim=-1).mean(),rtol=1e-2)
f_tx, f_y = self.f_net(tx_tensor).mean(), self.f_net(trg_img_emb).mean()
if self.cfg.optimal_penalty:
gradient_penalty = gradientOptimality(
self.f_net, tx_tensor, src_text_cond["text_embed"], self.cfg.coeff_go
)
else:
gradient_penalty = 0.0
f_loss = f_tx - f_y + gradient_penalty
log_info = prefix_metrics_keys(
{
"f_tx": f_tx,
"f_y": f_y,
"gradient_penalty": gradient_penalty,
"f_tx-f_y": f_tx - f_y,
},
"f_loss",
)
return f_loss, log_info
def loss_map(self, src_text_cond, mask=None):
# src_text_cond = {"text_embed": text_embed, "text_encodings": text_encodings, "unnorm_text_embed": unnorm_text_embed}
tx_tensor = self.map_t(**src_text_cond)
cost_loss = self.cost_func(
src_text_cond["unnorm_text_embed"],
l2norm(tx_tensor),
self.cfg.coeff_mse,
self.cfg.exponent,
)
f_tx = self.f_net(tx_tensor).mean()
map_loss = cost_loss - f_tx
log_info = prefix_metrics_keys(
{"cost_loss": cost_loss, "f_tx": f_tx}, "map_loss"
)
return map_loss, log_info
def validation_step(self, batch, batch_idx):
# evaluate cosine similarity
trg_img_emb, src_tokens = batch
text_embed, text_encodings = self.clip.embed_text(src_tokens)
src_text_cond = {"text_embed": text_embed, "text_encodings": text_encodings}
self.cos_similarity(src_text_cond, trg_img_emb)
def cos_similarity(self, src_text_cond, trg_img_emb):
if self.cfg.ema:
with self.ema_map.average_parameters():
tx_tensor = l2norm(self.map_t(**src_text_cond))
src_txt_emb = src_text_cond["text_embed"]
txt_trg_sim = -self.cost_func(src_txt_emb, trg_img_emb)
txt_pf_sim = -self.cost_func(src_txt_emb, tx_tensor)
pf_trg_sim = -self.cost_func(tx_tensor, trg_img_emb)
rdm_idx = torch.randperm(trg_img_emb.shape[0])
unrelated_sim = -self.cost_func(tx_tensor, src_txt_emb[rdm_idx])
log_info = prefix_metrics_keys(
{
"baseline similarity": txt_trg_sim,
"similarity with text": txt_pf_sim,
"difference from baseline similarity": abs(txt_trg_sim - txt_pf_sim),
"similarity with original image": pf_trg_sim,
"similarity with unrelated caption": unrelated_sim,
},
"validation_cos_sim",
)
self.log_dict(log_info)
def test_step(self, batch, batch_idx, dataloader_idx=0):
if batch_idx <= 100:
# assert 1 == 0, "Too many test samples, terminate earlier."
if dataloader_idx == 0:
# visualize the embedding
trg_img_emb, src_tokens = batch
text_embed, text_encodings = self.clip.embed_text(src_tokens)
src_text_cond = {
"text_embed": text_embed,
"text_encodings": text_encodings,
}
pf_img_emb = l2norm(self.map_t(**src_text_cond))
discrete_ot_map_img_emb = trg_img_emb.detach() * 0
return pf_img_emb, trg_img_emb, discrete_ot_map_img_emb
# sampling images
test_example_data = split_test_full_data(batch, self.device)
# TODO: this callback can have a problem, we hard code it with index.
sampling_callback = self.trainer.callbacks[3]
test_images, test_captions = generate_grid_samples(
self,
sampling_callback.decoder,
sampling_callback.prior,
test_example_data,
device=self.device,
skip_ema=True,
)
cherry_pick_img_grid = make_grid(
test_images, nrow=1, padding=2, pad_value=0
)
img_path = f"img_{batch_idx}.png"
save_image(cherry_pick_img_grid, img_path)
torch.save(
{"images": test_images, "captions": test_captions},
f"raw_data_{batch_idx}.pt",
)
# After generating the grid image and saving it
add_labels_to_image(img_path)
return None
return None
def test_epoch_end(self, outputs):
for idx, out in enumerate(outputs):
pf_img_emb, trg_img_emb, discrete_ot_map_img_emb = out
if idx == 0:
stacked_pf_feat = pf_img_emb
stacked_trg_feat = trg_img_emb
stacked_discrete_ot_feat = torch.from_numpy(discrete_ot_map_img_emb)
else:
stacked_pf_feat = torch.cat([stacked_pf_feat, pf_img_emb], dim=0)
stacked_trg_feat = torch.cat([stacked_trg_feat, trg_img_emb], dim=0)
stacked_discrete_ot_feat = torch.cat(
[
stacked_discrete_ot_feat,
torch.from_numpy(discrete_ot_map_img_emb),
],
dim=0,
)
compare_highd_kde_scatter(
[stacked_pf_feat, stacked_trg_feat, stacked_discrete_ot_feat], "pca.jpg"
)
def configure_optimizers(self):
# These parameters are from LAION pretrained prior.
optim_map_kwargs = dict(
lr=self.cfg.lr_T, wd=self.cfg.wd, eps=1e-6, group_wd_params=True
)
optimizer_map = get_optimizer(self.map_t.parameters(), **optim_map_kwargs)
optim_f_kwargs = dict(
lr=self.cfg.lr_f, wd=self.cfg.wd, eps=1e-6, group_wd_params=True
)
optimizer_f = get_optimizer(self.f_net.parameters(), **optim_f_kwargs)
return optimizer_map, optimizer_f
| [] |
2024-01-10 | gucky92/Auto-GPT | tests~integration~conftest.py | import os
import openai.api_requestor
import pytest
from pytest_mock import MockerFixture
from tests.conftest import PROXY
from tests.vcr.vcr_filter import before_record_request, before_record_response
BASE_VCR_CONFIG = {
"record_mode": "new_episodes",
"before_record_request": before_record_request,
"before_record_response": before_record_response,
"filter_headers": [
"Authorization",
"X-OpenAI-Client-User-Agent",
"User-Agent",
],
"match_on": ["method", "body"],
}
@pytest.fixture(scope="session")
def vcr_config():
# this fixture is called by the pytest-recording vcr decorator.
return BASE_VCR_CONFIG
def patch_api_base(requestor):
new_api_base = f"{PROXY}/v1"
requestor.api_base = new_api_base
return requestor
@pytest.fixture
def patched_api_requestor(mocker: MockerFixture):
original_init = openai.api_requestor.APIRequestor.__init__
original_validate_headers = openai.api_requestor.APIRequestor._validate_headers
def patched_init(requestor, *args, **kwargs):
original_init(requestor, *args, **kwargs)
patch_api_base(requestor)
def patched_validate_headers(self, supplied_headers):
headers = original_validate_headers(self, supplied_headers)
headers["AGENT-MODE"] = os.environ.get("AGENT_MODE")
headers["AGENT-TYPE"] = os.environ.get("AGENT_TYPE")
return headers
if PROXY:
mocker.patch("openai.api_requestor.APIRequestor.__init__", new=patched_init)
mocker.patch.object(
openai.api_requestor.APIRequestor,
"_validate_headers",
new=patched_validate_headers,
)
| [] |
2024-01-10 | norrishuang/private-llm-qa-bot | doc_preprocess~QA_auto_generator.py | import os
import re
import argparse
import openai
import json
from tqdm import tqdm
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import MarkdownTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
# you need to install these packages: pypdf, tqdm, openai, langchain
# Please excute => export OPENAI_API_KEY={key}
openai.api_key = os.getenv("OPENAI_API_KEY")
en_prompt_template = """
Here is one page of {product}'s manual document
```
{page}
```
Please automatically generate as many questions as possible based on this manual document, and follow these rules:
1. "{product}"" should be contained in every question
2. questions start with "Question:"
3. answers begin with "Answer:"
"""
# zh_prompt_template = """
# Here is one page of {product}'s manual document
# ```
# {page}
# ```
# Please automatically generate as many questions as possible based on this manual document, and follow these rules:
# 1. "{product}"" should be contained in every question
# 2. questions start with "Question:"
# 3. answers begin with "Answer:"
# 4. Answer in Chinese
# """
zh_prompt_template = """
ๅฆไธไธไธชๅๆฌๅทไธญๆฏ{product}็ไบงๅๆๆกฃ็ๆฎต
```
{page}
```
่ฏทๅบไบ่ฟไบๆๆกฃ็ๆฎต่ชๅจ็ๆๅฐฝๅฏ่ฝๅค็้ฎ้ขไปฅๅๅฏนๅบ็ญๆก, ๅฐฝๅฏ่ฝ่ฏฆ็ปๅ
จ้ข, ๅนถไธ้ตๅพชๅฆไธ่งๅ:
1. "{product}"้่ฆไธ็ด่ขซๅ
ๅซๅจQuestionไธญ
2. ้ฎ้ข้จๅ้่ฆไปฅ"Question:"ๅผๅง
3. ็ญๆก้จๅ้่ฆไปฅ"Answer:"ๅผๅง
"""
def Generate_QA(prompt):
messages = [{"role": "user", "content": f"{prompt}"}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0,
max_tokens=2048
)
content = response.choices[0]["message"]["content"]
arr = content.split('Question:')[1:]
qa_pair = [ p.split('Answer:') for p in arr ]
return qa_pair
def Generate_QA_From_Docs(pages, prompt_template, product_name="Midea Dishwasher", out_format="json"):
for page in tqdm(pages[13:23]):
# print(page)
# yield { "doc" : page.page_content }
prompt = prompt_template.format(product=product_name, page=page.page_content)
qa_list = Generate_QA(prompt)
for q_c,a_c in qa_list:
if out_format == "json":
ret = page.metadata
ret["Q"] = q_c.strip()
ret["A"] = a_c.strip()
yield ret
elif out_format == "QA":
yield "Question: " + q_c.strip() + "\nAnswer: " + a_c.strip() + "\n\n"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', type=str, default='./1-Manual.pdf', help='input file')
parser.add_argument('--output_file', type=str, default='./FAQ.txt', help='output file')
parser.add_argument('--product', type=str, default="Midea Dishwasher", help='specify the product name of doc')
parser.add_argument('--input_format', type=str, default="pdf", help='specify the format')
parser.add_argument('--lang', type=str, default="en", help='specify the language')
parser.add_argument('--output_format', type=str, default="json", help='specify the language')
args = parser.parse_args()
doc_path = args.input_file
product_name = args.product
qa_path = args.output_file
in_format = args.input_format
lang = args.lang
out_format= args.output_format
prompt_template = zh_prompt_template if lang == "zh" else en_prompt_template
docs = None
if in_format == "pdf":
loader = PyPDFLoader(doc_path)
docs = loader.load_and_split()
elif in_format == "md":
in_file = open(doc_path, 'r')
markdown_text = in_file.read()
# markdown_splitter = MarkdownTextSplitter(chunk_size=500, chunk_overlap=0)
text_splitter = RecursiveCharacterTextSplitter(
separators=["#","\n\n", "\n"],
chunk_size = 1000,
chunk_overlap = 0
)
docs = text_splitter.create_documents([markdown_text])
else:
raise RuntimeError
out_f = open(qa_path, 'w')
with open(qa_path, 'w') as out_f:
for result in Generate_QA_From_Docs(docs, prompt_template, product_name, out_format):
if out_format == "json":
out_f.write(json.dumps(result, ensure_ascii=False))
out_f.write("\n")
elif out_format == "QA":
out_f.write(result)
| [
"\nHere is one page of {product}'s manual document\n```\n{page}\n```\nPlease automatically generate as many questions as possible based on this manual document, and follow these rules:\n1. \"{product}\"\" should be contained in every question\n2. questions start with \"Question:\"\n3. answers begin with \"Answer:\"\n",
"\nๅฆไธไธไธชๅๆฌๅทไธญๆฏ{product}็ไบงๅๆๆกฃ็ๆฎต\n```\n{page}\n```\n่ฏทๅบไบ่ฟไบๆๆกฃ็ๆฎต่ชๅจ็ๆๅฐฝๅฏ่ฝๅค็้ฎ้ขไปฅๅๅฏนๅบ็ญๆก, ๅฐฝๅฏ่ฝ่ฏฆ็ปๅ
จ้ข, ๅนถไธ้ตๅพชๅฆไธ่งๅ:\n1. \"{product}\"้่ฆไธ็ด่ขซๅ
ๅซๅจQuestionไธญ\n2. ้ฎ้ข้จๅ้่ฆไปฅ\"Question:\"ๅผๅง\n3. ็ญๆก้จๅ้่ฆไปฅ\"Answer:\"ๅผๅง\n"
] |
2024-01-10 | norrishuang/private-llm-qa-bot | deploy~lambda~intention~intention.py | import json
import os
import logging
from langchain.embeddings import SagemakerEndpointEmbeddings
from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler
from langchain.vectorstores import OpenSearchVectorSearch
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain import PromptTemplate, SagemakerEndpoint
from typing import Any, Dict, List, Union,Mapping, Optional, TypeVar, Union
from langchain.chains import LLMChain
from langchain.llms.bedrock import Bedrock
from botocore.exceptions import ClientError
from opensearchpy import OpenSearch, RequestsHttpConnection, AWSV4SignerAuth, helpers
import boto3
logger = logging.getLogger()
logger.setLevel(logging.INFO)
credentials = boto3.Session().get_credentials()
class APIException(Exception):
def __init__(self, message, code: str = None):
if code:
super().__init__("[{}] {}".format(code, message))
else:
super().__init__(message)
def handle_error(func):
"""Decorator for exception handling"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except APIException as e:
logger.exception(e)
raise e
except Exception as e:
logger.exception(e)
raise RuntimeError(
"Unknown exception, please check Lambda log for more details"
)
return wrapper
class ContentHandler(EmbeddingsContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, inputs: List[str], model_kwargs: Dict) -> bytes:
instruction_zh = "ไธบ่ฟไธชๅฅๅญ็ๆ่กจ็คบไปฅ็จไบๆฃ็ดข็ธๅ
ณๆ็ซ ๏ผ"
instruction_en = "Represent this sentence for searching relevant passages:"
input_str = json.dumps({"inputs": inputs, "parameters":{}, "is_query":False, "instruction":instruction_en})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> List[List[float]]:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["sentence_embeddings"]
class llmContentHandler(LLMContentHandler):
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({'inputs': prompt,'history':[],**model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["outputs"]
def create_intention_prompt_templete():
prompt_template = """{instruction}\n\n{fewshot}\n\n"Q: \"{query}\"๏ผ่ฟไธช้ฎ้ข็ๆ้ฎๆๅพๆฏๅฅ๏ผๅฏ้้กน[{options}]\nA: """
PROMPT = PromptTemplate(
template=prompt_template,
input_variables=['fewshot','query', 'instruction', 'options']
)
return PROMPT
def get_bedrock_aksk(secret_name='chatbot_bedrock', region_name = "us-west-2"):
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
# For a list of exceptions thrown, see
# https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
raise e
# Decrypts secret using the associated KMS key.
secret = json.loads(get_secret_value_response['SecretString'])
return secret['BEDROCK_ACCESS_KEY'],secret['BEDROCK_SECRET_KEY']
@handle_error
def lambda_handler(event, context):
embedding_endpoint = os.environ.get('embedding_endpoint')
region = os.environ.get('region')
aos_endpoint = os.environ.get('aos_endpoint')
index_name = os.environ.get('index_name')
query = event.get('query')
fewshot_cnt = event.get('fewshot_cnt')
use_bedrock = event.get('use_bedrock')
llm_model_endpoint = os.environ.get('llm_model_endpoint')
logger.info("embedding_endpoint: {}".format(embedding_endpoint))
logger.info("region:{}".format(region))
logger.info("aos_endpoint:{}".format(aos_endpoint))
logger.info("index_name:{}".format(index_name))
logger.info("fewshot_cnt:{}".format(fewshot_cnt))
logger.info("llm_model_endpoint:{}".format(llm_model_endpoint))
content_handler = ContentHandler()
embeddings = SagemakerEndpointEmbeddings(
endpoint_name=embedding_endpoint,
region_name=region,
content_handler=content_handler
)
auth = AWSV4SignerAuth(credentials, region)
docsearch = OpenSearchVectorSearch(
index_name=index_name,
embedding_function=embeddings,
opensearch_url="https://{}".format(aos_endpoint),
http_auth = auth,
use_ssl = True,
verify_certs = True,
connection_class = RequestsHttpConnection
)
docs = docsearch.similarity_search_with_score(
query=query,
k = fewshot_cnt,
space_type="cosinesimil",
vector_field="embedding",
text_field="query",
metadata_field='*'
)
docs_simple = [ {"query" : doc[0].page_content, "intention" : doc[0].metadata['intention'], "score":doc[1]} for doc in docs]
options = set([doc['intention'] for doc in docs_simple ])
options_str = ", ".join(options)
instruction = "ๅ็ญไธๅ้ๆฉ้ข๏ผ"
examples = [ "Q: \"{}\"๏ผ่ฟไธช้ฎ้ข็ๆ้ฎๆๅพๆฏๅฅ๏ผๅฏ้้กน[{}]\nA: {}".format(doc['query'], options_str, doc['intention']) for doc in docs_simple ]
fewshot_str = "\n\n".join(examples)
parameters = {
"temperature": 0.01,
}
llm = None
if not use_bedrock:
llmcontent_handler = llmContentHandler()
llm=SagemakerEndpoint(
endpoint_name=llm_model_endpoint,
region_name=region,
model_kwargs={'parameters':parameters},
content_handler=llmcontent_handler
)
else:
ACCESS_KEY, SECRET_KEY=get_bedrock_aksk()
boto3_bedrock = boto3.client(
service_name="bedrock",
region_name="us-east-1",
endpoint_url="https://bedrock.us-east-1.amazonaws.com",
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY
)
parameters = {
"max_tokens_to_sample": 20,
"stop_sequences": ["\n\n"],
"temperature":0.01,
"top_p":1
}
llm = Bedrock(model_id="anthropic.claude-v1", client=boto3_bedrock, model_kwargs=parameters)
prompt_template = create_intention_prompt_templete()
prompt = prompt_template.format(fewshot=fewshot_str, instruction=instruction, query=query, options=options_str)
if len(options) == 1:
logger.info("Notice: Only Single latent Intention detected.")
answer = options.pop()
log_dict = { "prompt" : prompt, "answer" : answer, "examples": docs_simple }
log_dict_str = json.dumps(log_dict, ensure_ascii=False)
logger.info(log_dict_str)
return answer
llmchain = LLMChain(llm=llm, verbose=False, prompt=prompt_template)
answer = llmchain.run({'fewshot':fewshot_str, "instruction":instruction, "query":query, "options": options_str})
answer = answer.strip()
log_dict = { "prompt" : prompt, "answer" : answer , "examples": docs_simple }
log_dict_str = json.dumps(log_dict, ensure_ascii=False)
logger.info(log_dict_str)
if answer not in options:
answer = 'unknown'
return answer
| [
"Q: ",
"instruction",
"ๅ็ญไธๅ้ๆฉ้ข๏ผ\n\nPLACEHOLDER\n\n\"Q: \"PLACEHOLDER\"๏ผ่ฟไธช้ฎ้ข็ๆ้ฎๆๅพๆฏๅฅ๏ผๅฏ้้กน[PLACEHOLDER]\nA: ",
"options",
"{instruction}\n\n{fewshot}\n\n\"Q: \"{query}\"๏ผ่ฟไธช้ฎ้ข็ๆ้ฎๆๅพๆฏๅฅ๏ผๅฏ้้กน[{options}]\nA: "
] |
2024-01-10 | norrishuang/private-llm-qa-bot | code~intention_detect~intention.py | import json
import os
import logging
from langchain.embeddings import SagemakerEndpointEmbeddings
from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler
from langchain.vectorstores import OpenSearchVectorSearch
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain import PromptTemplate, SagemakerEndpoint
from typing import Any, Dict, List, Union,Mapping, Optional, TypeVar, Union
from langchain.chains import LLMChain
from opensearchpy import OpenSearch, RequestsHttpConnection, AWSV4SignerAuth, helpers
import boto3
logger = logging.getLogger()
logger.setLevel(logging.INFO)
credentials = boto3.Session().get_credentials()
class APIException(Exception):
def __init__(self, message, code: str = None):
if code:
super().__init__("[{}] {}".format(code, message))
else:
super().__init__(message)
def handle_error(func):
"""Decorator for exception handling"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except APIException as e:
logger.exception(e)
raise e
except Exception as e:
logger.exception(e)
raise RuntimeError(
"Unknown exception, please check Lambda log for more details"
)
return wrapper
class ContentHandler(EmbeddingsContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, inputs: List[str], model_kwargs: Dict) -> bytes:
instruction_zh = "ไธบ่ฟไธชๅฅๅญ็ๆ่กจ็คบไปฅ็จไบๆฃ็ดข็ธๅ
ณๆ็ซ ๏ผ"
instruction_en = "Represent this sentence for searching relevant passages:"
input_str = json.dumps({"inputs": inputs, "parameters":{}, "is_query":False, "instruction":instruction_en})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> List[List[float]]:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["sentence_embeddings"]
class llmContentHandler(LLMContentHandler):
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({'inputs': prompt,'history':[],**model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["outputs"]
def create_intention_prompt_templete():
prompt_template = """{instruction}\n\n{fewshot}\n\n"Q: \"{query}\"๏ผ่ฟไธช้ฎ้ข็ๆ้ฎๆๅพๆฏๅฅ๏ผๅฏ้้กน[{options}]\nA: """
PROMPT = PromptTemplate(
template=prompt_template,
input_variables=['fewshot','query', 'instruction', 'options']
)
return PROMPT
@handle_error
def lambda_handler(event, context):
embedding_endpoint = os.environ.get('embedding_endpoint')
region = os.environ.get('region')
aos_endpoint = os.environ.get('aos_endpoint')
index_name = os.environ.get('index_name')
query = event.get('query')
fewshot_cnt = event.get('fewshot_cnt')
llm_model_endpoint = os.environ.get('llm_model_endpoint')
logger.info("embedding_endpoint: {}".format(embedding_endpoint))
logger.info("region:{}".format(region))
logger.info("aos_endpoint:{}".format(aos_endpoint))
logger.info("index_name:{}".format(index_name))
logger.info("fewshot_cnt:{}".format(fewshot_cnt))
logger.info("llm_model_endpoint:{}".format(llm_model_endpoint))
content_handler = ContentHandler()
embeddings = SagemakerEndpointEmbeddings(
endpoint_name=embedding_endpoint,
region_name=region,
content_handler=content_handler
)
auth = AWSV4SignerAuth(credentials, region)
docsearch = OpenSearchVectorSearch(
index_name=index_name,
embedding_function=embeddings,
opensearch_url="https://{}".format(aos_endpoint),
http_auth = auth,
use_ssl = True,
verify_certs = True,
connection_class = RequestsHttpConnection
)
docs = docsearch.similarity_search_with_score(
query=query,
k = fewshot_cnt,
space_type="cosinesimil",
vector_field="embedding",
text_field="query",
metadata_field='*'
)
docs_simple = [ {"query" : doc[0].page_content, "intention" : doc[0].metadata['intention'], "score":doc[1]} for doc in docs]
options = set([doc['intention'] for doc in docs_simple ])
options_str = ", ".join(options)
instruction = "ๅ็ญไธๅ้ๆฉ้ข๏ผ"
examples = [ "Q: \"{}\"๏ผ่ฟไธช้ฎ้ข็ๆ้ฎๆๅพๆฏๅฅ๏ผๅฏ้้กน[{}]\nA: {}".format(doc['query'], options_str, doc['intention']) for doc in docs_simple ]
fewshot_str = "\n\n".join(examples)
parameters = {
"temperature": 0.01,
}
llmcontent_handler = llmContentHandler()
llm=SagemakerEndpoint(
endpoint_name=llm_model_endpoint,
region_name=region,
model_kwargs={'parameters':parameters},
content_handler=llmcontent_handler
)
prompt_template = create_intention_prompt_templete()
prompt = prompt_template.format(fewshot=fewshot_str, instruction=instruction, query=query, options=options_str)
if len(options) == 1:
logger.info("Notice: Only Single latent Intention detected.")
answer = options.pop()
log_dict = { "prompt" : prompt, "answer" : answer, "examples": docs_simple }
log_dict_str = json.dumps(log_dict, ensure_ascii=False)
logger.info(log_dict_str)
return answer
llmchain = LLMChain(llm=llm, verbose=False, prompt=prompt_template)
answer = llmchain.run({'fewshot':fewshot_str, "instruction":instruction, "query":query, "options": options_str})
log_dict = { "prompt" : prompt, "answer" : answer , "examples": docs_simple }
log_dict_str = json.dumps(log_dict, ensure_ascii=False)
logger.info(log_dict_str)
if answer not in options:
answer = 'unknown'
return answer
| [
"Q: ",
"instruction",
"ๅ็ญไธๅ้ๆฉ้ข๏ผ\n\nPLACEHOLDER\n\n\"Q: \"PLACEHOLDER\"๏ผ่ฟไธช้ฎ้ข็ๆ้ฎๆๅพๆฏๅฅ๏ผๅฏ้้กน[PLACEHOLDER]\nA: ",
"options",
"{instruction}\n\n{fewshot}\n\n\"Q: \"{query}\"๏ผ่ฟไธช้ฎ้ข็ๆ้ฎๆๅพๆฏๅฅ๏ผๅฏ้้กน[{options}]\nA: "
] |
2024-01-10 | norrishuang/private-llm-qa-bot | code~aos_write_job.py | #!/usr/bin/env python
# coding: utf-8
from opensearchpy import OpenSearch, RequestsHttpConnection, AWSV4SignerAuth, helpers
import boto3
import random
import json
from awsglue.utils import getResolvedOptions
import sys
import hashlib
import datetime
import re
import os
import itertools
from bs4 import BeautifulSoup
from langchain.document_loaders import PDFMinerPDFasHTMLLoader
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter,CharacterTextSplitter
import logging
args = getResolvedOptions(sys.argv, ['bucket', 'object_key','AOS_ENDPOINT','REGION','EMB_MODEL_ENDPOINT','PUBLISH_DATE'])
s3 = boto3.resource('s3')
bucket = args['bucket']
object_key = args['object_key']
QA_SEP = '=====' # args['qa_sep'] #
EXAMPLE_SEP = '\n\n'
arg_chunk_size = 384
# EMB_MODEL_ENDPOINT = "st-paraphrase-mpnet-base-v2-2023-04-19-04-14-31-658-endpoint"
EMB_MODEL_ENDPOINT=args['EMB_MODEL_ENDPOINT']
smr_client = boto3.client("sagemaker-runtime")
# AOS_ENDPOINT = 'vpc-chatbot-knn-3qe6mdpowjf3cklpj5c4q2blou.us-east-1.es.amazonaws.com'
AOS_ENDPOINT = args['AOS_ENDPOINT']
REGION = args['REGION']
publish_date = args['PUBLISH_DATE'] if 'PUBLISH_DATE' in args.keys() else datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
INDEX_NAME = 'chatbot-index'
EXAMPLE_INDEX_NAME = 'chatbot-example-index'
EMB_BATCH_SIZE=20
Sentence_Len_Threshold=10
Paragraph_Len_Threshold=20
DOC_INDEX_TABLE= 'chatbot_doc_index'
dynamodb = boto3.client('dynamodb')
AOS_BENCHMARK_ENABLED=False
import numpy as np
def get_embedding(smr_client, text_arrs, endpoint_name=EMB_MODEL_ENDPOINT):
if AOS_BENCHMARK_ENABLED:
text_len = len(text_arrs)
return [ np.random.rand(768).tolist() for i in range(text_len) ]
parameters = {
}
response_model = smr_client.invoke_endpoint(
EndpointName=endpoint_name,
Body=json.dumps(
{
"inputs": text_arrs,
"parameters": parameters,
"is_query" : False,
"instruction" : None
}
),
ContentType="application/json",
)
json_str = response_model['Body'].read().decode('utf8')
json_obj = json.loads(json_str)
embeddings = json_obj["sentence_embeddings"]
return embeddings
def batch_generator(generator, batch_size):
while True:
batch = list(itertools.islice(generator, batch_size))
if not batch:
break
yield batch
def iterate_paragraph(file_content, object_key, smr_client, index_name, endpoint_name):
json_arr = json.loads(file_content)
doc_title = object_key
def chunk_generator(json_arr):
for idx, json_item in enumerate(json_arr):
header = ""
if len(json_item['heading']) > 0:
header = json_item['heading'][0]['heading']
paragraph_content = json_item['content']
if len(paragraph_content) > 1024 or len(paragraph_content) < Sentence_Len_Threshold:
continue
yield (idx, paragraph_content, 'Paragraph', paragraph_content)
sentences = re.split('[ใ๏ผ?.๏ผ!]', paragraph_content)
for sent in (sent for sent in sentences if len(sent) > Sentence_Len_Threshold):
yield (idx, sent, 'Sentence', paragraph_content)
generator = chunk_generator(json_arr)
batches = batch_generator(generator, batch_size=EMB_BATCH_SIZE)
for batch in batches:
if batch is not None:
emb_src_texts = [item[1] for item in batch]
print("len of emb_src_texts :{}".format(len(emb_src_texts)))
embeddings = get_embedding(smr_client, emb_src_texts, endpoint_name)
for i, emb in enumerate(embeddings):
document = { "publish_date": publish_date, "idx": batch[i][0], "doc" : batch[i][1], "doc_type" : batch[i][2], "content" : batch[i][3], "doc_title": doc_title, "doc_category": "", "embedding" : emb}
yield {"_index": index_name, "_source": document, "_id": hashlib.md5(str(document['doc']).encode('utf-8')).hexdigest()}
def iterate_pdf_json(file_content, object_key, smr_client, index_name, endpoint_name):
json_arr = json.loads(file_content)
doc_title = json_arr[0]['doc_title']
def chunk_generator(json_arr):
for idx, json_item in enumerate(json_arr):
paragraph_content = None
content = json_item['content']
# print("----{}----".format(idx))
# print(content)
is_table = not isinstance(content, str)
doc_category = 'table' if is_table else 'paragraph'
if is_table:
paragraph_content = "Table - {}\n{}\n\n{}".format(content['table'], json.dumps(content['data']), content['footer'])
else:
paragraph_content = "#{}\n{}".format(doc_title, content)
if len(paragraph_content) > 1024 or len(paragraph_content) < Paragraph_Len_Threshold:
continue
yield (idx, paragraph_content, 'Paragraph', paragraph_content, doc_category)
if is_table:
yield (idx, content['footer'], 'Sentence', content['footer'], doc_category)
else:
sentences = re.split('[ใ๏ผ?.๏ผ!]', paragraph_content)
for sent in (sent for sent in sentences if len(sent) > Sentence_Len_Threshold):
yield (idx, sent, 'Sentence', paragraph_content, doc_category)
generator = chunk_generator(json_arr)
batches = batch_generator(generator, batch_size=EMB_BATCH_SIZE)
try:
for batch in batches:
if batch is not None:
emb_src_texts = [item[1] for item in batch]
print("len of emb_src_texts :{}".format(len(emb_src_texts)))
embeddings = get_embedding(smr_client, emb_src_texts, endpoint_name)
for i, emb in enumerate(embeddings):
document = { "publish_date": publish_date, "idx": batch[i][0], "doc" : batch[i][1], "doc_type" : batch[i][2], "content" : batch[i][3], "doc_title": doc_title, "doc_category": batch[i][4], "embedding" : emb}
yield {"_index": index_name, "_source": document, "_id": hashlib.md5(str(document['doc']).encode('utf-8')).hexdigest()}
except Exception as e:
logging.exception(e)
def iterate_QA(file_content, object_key,smr_client, index_name, endpoint_name):
json_content = json.loads(file_content)
json_arr = json_content["qa_list"]
doc_title = object_key
doc_category = json_content["doc_category"]
it = iter(json_arr)
qa_batches = batch_generator(it, batch_size=EMB_BATCH_SIZE)
for idx, batch in enumerate(qa_batches):
questions = [ item['Question'] for item in batch ]
answers = [ item['Answer'] for item in batch ]
embeddings = get_embedding(smr_client, questions, endpoint_name)
for i in range(len(embeddings)):
document = { "publish_date": publish_date, "doc" : questions[i], "idx": idx,"doc_type" : "Question", "content" : answers[i], "doc_title": doc_title, "doc_category": doc_category, "embedding" : embeddings[i]}
yield {"_index": index_name, "_source": document, "_id": hashlib.md5(str(document).encode('utf-8')).hexdigest()}
embeddings_answer = get_embedding(smr_client, answers, endpoint_name)
for i in range(len(embeddings_answer)):
document = { "publish_date": publish_date, "doc" : questions[i], "idx": idx,"doc_type" : "Question", "content" : answers[i], "doc_title": doc_title, "doc_category": doc_category, "embedding" : embeddings_answer[i]}
yield {"_index": index_name, "_source": document, "_id": hashlib.md5(str(document).encode('utf-8')).hexdigest()}
def iterate_examples(file_content, object_key, smr_client, index_name, endpoint_name):
json_arr = json.loads(file_content)
it = iter(json_arr)
example_batches = batch_generator(it, batch_size=EMB_BATCH_SIZE)
for idx, batch in enumerate(example_batches):
queries = [ item['query'] for item in batch ]
intentions = [ item['intention'] for item in batch ]
replies = [ item['reply'] for item in batch ]
embeddings = get_embedding(smr_client, queries, endpoint_name)
for i, query in enumerate(queries):
print("query:")
print(query)
document = { "publish_date": publish_date, "intention" : intentions[i], "query" : queries[i], "reply" : replies[i], "embedding" : embeddings[i]}
yield {"_index": index_name, "_source": document, "_id": hashlib.md5(str(document).encode('utf-8')).hexdigest()}
def link_header(semantic_snippets):
heading_fonts_arr = [ item.metadata['heading_font'] for item in semantic_snippets ]
heading_arr = [ item.metadata['heading'] for item in semantic_snippets ]
def fontsize_mapping(heading_fonts_arr):
heading_fonts_set = list(set(heading_fonts_arr))
heading_fonts_set.sort(reverse=True)
idxs = range(len(heading_fonts_set))
font_idx_mapping = dict(zip(heading_fonts_set,idxs))
return font_idx_mapping
fontsize_dict = fontsize_mapping(heading_fonts_arr)
snippet_arr = []
for idx, snippet in enumerate(semantic_snippets):
font_size = heading_fonts_arr[idx]
heading_stack = []
heading_info = {"font_size":heading_fonts_arr[idx], "heading":heading_arr[idx], "fontsize_idx" : fontsize_dict[font_size]}
heading_stack.append(heading_info)
for id in range(0,idx)[::-1]:
if font_size < heading_fonts_arr[id]:
font_size = heading_fonts_arr[id]
heading_info = {"font_size":font_size, "heading":heading_arr[id], "fontsize_idx" : fontsize_dict[font_size]}
heading_stack.append(heading_info)
snippet_info = {
"heading" : heading_stack,
"content" : snippet.page_content
}
snippet_arr.append(snippet_info)
json_arr = json.dumps(snippet_arr, ensure_ascii=False)
return json_arr
def parse_pdf_to_json(file_content):
soup = BeautifulSoup(file_content,'html.parser')
content = soup.find_all('div')
cur_fs = None
cur_text = ''
snippets = [] # first collect all snippets that have the same font size
for c in content:
sp = c.find('span')
if not sp:
continue
st = sp.get('style')
if not st:
continue
fs = re.findall('font-size:(\d+)px',st)
if not fs:
continue
fs = int(fs[0])
if not cur_fs:
cur_fs = fs
if fs == cur_fs:
cur_text += c.text
else:
snippets.append((cur_text,cur_fs))
cur_fs = fs
cur_text = c.text
snippets.append((cur_text,cur_fs))
cur_idx = -1
semantic_snippets = []
# Assumption: headings have higher font size than their respective content
for s in snippets:
# if current snippet's font size > previous section's heading => it is a new heading
if not semantic_snippets or s[1] > semantic_snippets[cur_idx].metadata['heading_font']:
metadata={'heading':s[0], 'content_font': 0, 'heading_font': s[1]}
#metadata.update(data.metadata)
semantic_snippets.append(Document(page_content='',metadata=metadata))
cur_idx += 1
continue
# if current snippet's font size <= previous section's content => content belongs to the same section (one can also create
# a tree like structure for sub sections if needed but that may require some more thinking and may be data specific)
if not semantic_snippets[cur_idx].metadata['content_font'] or s[1] <= semantic_snippets[cur_idx].metadata['content_font']:
semantic_snippets[cur_idx].page_content += s[0]
semantic_snippets[cur_idx].metadata['content_font'] = max(s[1], semantic_snippets[cur_idx].metadata['content_font'])
continue
# if current snippet's font size > previous section's content but less tha previous section's heading than also make a new
# section (e.g. title of a pdf will have the highest font size but we don't want it to subsume all sections)
metadata={'heading':s[0], 'content_font': 0, 'heading_font': s[1]}
#metadata.update(data.metadata)
semantic_snippets.append(Document(page_content='',metadata=metadata))
cur_idx += 1
json_content = link_header(semantic_snippets)
return json_content
def parse_faq_to_json(file_content):
arr = file_content.split(QA_SEP)
json_arr = []
for item in arr:
print(item)
question, answer = item.strip().split("\n", 1)
question = question.replace("Question: ", "")
answer = answer.replace("Answer: ", "")
obj = {
"Question":question, "Answer":answer
}
json_arr.append(obj)
qa_content = {
"doc_title" : "",
"doc_category" : "",
"qa_list" : json_arr
}
json_content = json.dumps(qa_content, ensure_ascii=False)
return json_content
def parse_txt_to_json(file_content):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = arg_chunk_size,
chunk_overlap = 0,
)
results = []
chunks = text_splitter.create_documents([ file_content ] )
for chunk in chunks:
snippet_info = {
"heading" : [],
"content" : chunk.page_content
}
results.append(snippet_info)
json_content = json.dumps(results, ensure_ascii=False)
return json_content
def parse_example_to_json(file_content):
arr = file_content.split(EXAMPLE_SEP)
json_arr = []
for item in arr:
elements = item.strip().split("\n")
print("elements:")
print(elements)
obj = { element.split(":")[0] : element.split(":")[1] for element in elements }
json_arr.append(obj)
qa_content = {
"example_list" : json_arr
}
json_content = json.dumps(qa_content, ensure_ascii=False)
return json_content
def parse_html_to_json(html_docs):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = arg_chunk_size,
chunk_overlap = 0,
)
results = []
chunks = text_splitter.create_documents([ doc.page_content for doc in docs ] )
for chunk in chunks:
snippet_info = {
"heading" : [],
"content" : chunk.page_content
}
results.append(snippet_info)
json_content = json.dumps(results, ensure_ascii=False)
return json_content
def load_content_json_from_s3(bucket, object_key, content_type, credentials):
if content_type == 'pdf':
pdf_path=os.path.basename(object_key)
s3_client=boto3.client('s3', region_name=REGION)
s3_client.download_file(Bucket=bucket, Key=object_key, Filename=pdf_path)
loader = PDFMinerPDFasHTMLLoader(pdf_path)
file_content = loader.load()[0].page_content
json_content = parse_pdf_to_json(file_content)
return json_content
else:
obj = s3.Object(bucket,object_key)
file_content = obj.get()['Body'].read().decode('utf-8', errors='ignore').strip()
if content_type == 'faq':
json_content = parse_faq_to_json(file_content)
elif content_type =='txt':
json_content = parse_txt_to_json(file_content)
elif content_type =='json':
json_content = file_content
elif content_type == 'pdf.json':
json_content = file_content
elif content_type == 'example':
json_content = file_content
else:
raise RuntimeError("unsupport content type...(pdf, faq, txt, pdf.json are supported.)")
return json_content
def put_idx_to_ddb(filename,username,index_name,embedding_model):
try:
dynamodb.put_item(
Item={
'filename':{
'S':filename,
},
'username':{
'S':username,
},
'index_name':{
'S':index_name,
},
'embedding_model':{
'S':embedding_model,
}
},
TableName = DOC_INDEX_TABLE,
)
print(f"Put filename:{filename} with embedding:{embedding_model} index_name:{index_name} by user:{username} to ddb success")
return True
except Exception as e:
print(f"There was an error put filename:{filename} with embedding:{embedding_model} index_name:{index_name} to ddb: {str(e)}")
return False
def query_idx_from_ddb(filename,username,embedding_model):
try:
response = dynamodb.query(
TableName=DOC_INDEX_TABLE,
ExpressionAttributeValues={
':v1': {
'S': filename,
},
':v2': {
'S': username,
},
':v3': {
'S': embedding_model,
},
},
KeyConditionExpression='filename = :v1 and username = :v2',
ExpressionAttributeNames={"#e":"embedding_model"},
FilterExpression='#e = :v3',
ProjectionExpression='index_name'
)
if len(response['Items']):
index_name = response['Items'][0]['index_name']['S']
else:
index_name = ''
print (f"query filename:{filename} with embedding:{embedding_model} index_name:{index_name} from ddb")
return index_name
except Exception as e:
print(f"There was an error an error query filename:{filename} index from ddb: {str(e)}")
return ''
def get_idx_from_ddb(filename,embedding_model):
try:
response = dynamodb.get_item(
Key={
'filename':{
'S':filename,
},
'embedding_model':{
'S':embedding_model,
},
},
TableName = DOC_INDEX_TABLE,
)
index_name = ''
if response.get('Item'):
index_name = response['Item']['index_name']['S']
print (f"Get filename:{filename} with index_name:{index_name} from ddb")
return index_name
except Exception as e:
print(f"There was an error get filename:{filename} with embedding:{embedding_model} index from ddb: {str(e)}")
return ''
def WriteVecIndexToAOS(bucket, object_key, content_type, smr_client, aos_endpoint=AOS_ENDPOINT, region=REGION, index_name=INDEX_NAME):
credentials = boto3.Session().get_credentials()
auth = AWSV4SignerAuth(credentials, region)
# auth = ('xxxx', 'yyyy') master user/pwd
# auth = (aos_master, aos_pwd)
try:
file_content = load_content_json_from_s3(bucket, object_key, content_type, credentials)
# print("file_content:")
# print(file_content)
client = OpenSearch(
hosts = [{'host': aos_endpoint, 'port': 443}],
http_auth = auth,
use_ssl = True,
verify_certs = True,
connection_class = RequestsHttpConnection,
timeout = 60, # ้ป่ฎค่ถ
ๆถๆถ้ดๆฏ10 ็ง๏ผ
max_retries=5, # ้่ฏๆฌกๆฐ
retry_on_timeout=True
)
print("---------flag------")
gen_aos_record_func = None
if content_type == "faq":
gen_aos_record_func = iterate_QA(file_content, object_key,smr_client, index_name, EMB_MODEL_ENDPOINT)
elif content_type in ['txt', 'pdf', 'json']:
gen_aos_record_func = iterate_paragraph(file_content,object_key, smr_client, index_name, EMB_MODEL_ENDPOINT)
elif content_type in [ 'pdf.json' ]:
gen_aos_record_func = iterate_pdf_json(file_content,object_key, smr_client, index_name, EMB_MODEL_ENDPOINT)
elif content_type in ['example']:
gen_aos_record_func = iterate_examples(file_content,object_key, smr_client, index_name, EMB_MODEL_ENDPOINT)
else:
raise RuntimeError('No Such Content type supported')
# chunk_size ไธบๆๆกฃๆฐ ้ป่ฎคๅผไธบ500
# max_chunk_bytes ไธบๅๅ
ฅ็ๆๅคงๅญ่ๆฐ๏ผ้ป่ฎค100M่ฟๅคง๏ผๅฏไปฅๆนๆ10-15M
# max_retries ้่ฏๆฌกๆฐ
# initial_backoff ไธบ็ฌฌไธๆฌก้่ฏๆถsleep็็งๆฐ๏ผๅๆฌก้่ฏไผ็ฟปๅ
response = helpers.bulk(client, gen_aos_record_func, max_retries=3, initial_backoff=200, max_backoff=801, max_chunk_bytes=10 * 1024 * 1024)#, chunk_size=10000, request_timeout=60000)
return response
except Exception as e:
print(f"There was an error when ingest:{object_key} to aos cluster, Exception: {str(e)}")
return ''
def process_s3_uploaded_file(bucket, object_key):
print("********** object_key : " + object_key)
content_type = None
index_name = INDEX_NAME
if object_key.endswith(".faq"):
print("********** pre-processing faq file")
content_type = 'faq'
elif object_key.endswith(".txt"):
print("********** pre-processing text file")
content_type = 'txt'
elif object_key.endswith(".pdf.json"):
print("********** pre-processing pdf.json file")
content_type = 'pdf.json'
elif object_key.endswith(".pdf"):
print("********** pre-processing pdf file")
content_type = 'pdf'
elif object_key.endswith(".json"):
print("********** pre-processing json file")
content_type = 'json'
elif object_key.endswith(".example"):
print("********** pre-processing example file")
content_type = 'example'
index_name = EXAMPLE_INDEX_NAME
else:
raise RuntimeError("unsupport content type...(pdf, faq, txt, pdf.json are supported.)")
#check if it is already built
idx_name = get_idx_from_ddb(object_key,EMB_MODEL_ENDPOINT)
if len(idx_name) > 0:
print("doc file already exists")
return
response = WriteVecIndexToAOS(bucket, object_key, content_type, smr_client, index_name=index_name)
print("response:")
print(response)
print("ingest {} chunk to AOS".format(response[0]))
put_idx_to_ddb(filename=object_key,username='s3event',
index_name=index_name,
embedding_model=EMB_MODEL_ENDPOINT)
for s3_key in object_key.split(','):
print("processing {}".format(s3_key))
process_s3_uploaded_file(bucket, s3_key) | [
"0"
] |
2024-01-10 | Bhardwaj-python/J.A.R.V.I.S. | J.A.R.V.I.S~Brain~AIBrain.py | import openai
fileopen = open("D:\\Bhardwaj\\J.A.R.V.I.S\\Data\\Api.txt")
API = fileopen.read()
fileopen.close()
def ReplyBrain(question, chat_log=None):
file_path = "D:\\Bhardwaj\\J.A.R.V.I.S\\Database\\chat_log.txt"
with open(file_path, "r") as file:
chat_log_template = file.read()
if chat_log is None:
chat_log = chat_log_template
openai.api_key = API
prompt = f'{chat_log} You : {question}\nJ.A.R.V.I.S. : '
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.5,
max_tokens = 4008,
top_p=0.3,
frequency_penalty=0.0,
presence_penalty=0.0
)
answer = response.choices[0].text.strip()
return answer
| [
"PLACEHOLDER You : PLACEHOLDER\nJ.A.R.V.I.S. : "
] |
2024-01-10 | Bhardwaj-python/J.A.R.V.I.S. | J.A.R.V.I.S~Brain~Qna.py | #Api Key
fileopen = open("D:\\Bhardwaj\\J.A.R.V.I.S\\Data\\Api.txt")
API = fileopen.read()
fileopen.close()
#Modules
import openai
#Coding
openai.api_key = API
completion = openai.Completion()
def QuestionAnswer(question, chat_log=None):
file_path = "D:\\Bhardwaj\\J.A.R.V.I.S\\Database\\chat_log.txt"
with open(file_path, "r") as file:
chat_log_template = file.read()
if chat_log is None:
chat_log = chat_log_template
prompt = f'{chat_log} You : {question}\nJ.A.R.V.I.S. : '
response = completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.0,
max_tokens = 100,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
answer = response.choices[0].text.strip()
return answer
| [
"PLACEHOLDER You : PLACEHOLDER\nJ.A.R.V.I.S. : "
] |
2024-01-10 | emrgnt-cmplxty/quantgpt | quantgpt~core~data~cache.py | import logging
import os
import pickle
import time
from enum import Enum
from typing import Any
import openai
from quantgpt.financial_tools.utils import home_path
logger = logging.getLogger(__name__)
class DataCache:
def __init__(
self,
cache_file=None,
initial_prompt_file=None,
final_prompt_file=None,
overwrite_cache=False,
):
self.cache = (
self.load_object("cache", cache_file) if cache_file else {}
)
self.initial_prompt = (
self.load_object("prompts", initial_prompt_file)
if initial_prompt_file
else ""
)
self.final_prompt = (
self.load_object("prompts", final_prompt_file)
if final_prompt_file
else ""
)
self.overwrite_cache = overwrite_cache
def fetch_summary(self, prompt, max_retries=3):
retries = 0
response_summary = None
success = False
while retries < max_retries and not success:
try:
response_summary = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are Bloomberg GPT, a Large Language Model which specializes in understanding financial data.",
},
{"role": "user", "content": prompt},
],
)
success = True
except Exception as e:
logger.error(f"Error while fetching summary: {e}")
retries += 1
time.sleep(5) # Wait for 5 seconds before retrying
if not success:
raise Exception("Failed to fetch summary after multiple attempts")
return [prompt, response_summary]
def load_prompt(self, file_path):
with open(file_path, "r") as file:
return file.read().strip()
def build_prompt(self, title, body):
return f"{self.initial_prompt} Title:\n{title}\nBody:\n{body}\n {self.final_prompt}"
def get_result(self, title, body):
prompt = self.build_prompt(title, body[0:3_000])
if title in self.cache and not self.overwrite_cache:
return self.cache[title]
else:
openai.api_key = (
"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"
)
logger.info("Fetching sentiment for title: " + title + "...")
result = self.fetch_summary(prompt)
self.cache[title] = result[1]
self.save_object(
"cache", "cache.pkl", self.cache
) # Save the cache out.
return result[1]
def categorize_result(self, result):
category = (
result["choices"][0]["message"]["content"]
.split("\n")[-1]
.strip()
.replace(".", "")
.replace(",", "")
)
if ":" in category:
category = category.split(":")[-1]
category = category.strip().replace(".", "").replace(",", "")
if "extremely positive".upper() in category.upper():
category = self.Category("EXTREMELY_POSITIVE")
elif "very positive".upper() in category.upper():
category = self.Category("VERY_POSITIVE")
elif "positive".upper() in category.upper():
category = self.Category("POSITIVE")
elif "neutral".upper() in category.upper():
category = self.Category("NEUTRAL")
elif "negative".upper() in category.upper():
category = self.Category("NEGATIVE")
else:
category = self.Category("N/A")
return category
def save_object(self, obj_dir: str, file_name: str, obj: Any):
file_path = os.path.join(
home_path(),
"data",
"openai",
obj_dir,
file_name,
)
with open(file_path, "wb") as file:
pickle.dump(obj, file)
def load_object(self, obj_dir: str, file_name: str):
file_path = os.path.join(
home_path(),
"data",
"openai",
obj_dir,
file_name,
)
if ".pkl" in file_name:
# Saving the data object to a file
with open(file_path, "rb") as file:
obj = pickle.load(file)
return obj
elif ".txt" in file_name:
with open(file_path, "r") as file:
return file.read().strip()
else:
raise ValueError("Filetype not supported.")
class Category(Enum):
EXTREMELY_POSITIVE = "EXTREMELY_POSITIVE"
VERY_POSITIVE = "VERY_POSITIVE"
POSITIVE = "POSITIVE"
NEUTRAL = "NEUTRAL"
NEGATIVE = "NEGATIVE"
N_A = "N/A"
def __str__(self):
return self.value
| [
"You are Bloomberg GPT, a Large Language Model which specializes in understanding financial data."
] |
2024-01-10 | shruti222patel/repo-gpt | src~repo_gpt~agents~central_intelligence.py | import logging
from repo_gpt.agents.base_agent import BaseAgent
from repo_gpt.agents.code_writer import CodeWritingAgent
from repo_gpt.agents.repo_comprehender import RepoUnderstandingAgent
from repo_gpt.file_handler.generic_code_file_handler import PythonFileHandler
from repo_gpt.openai_service import OpenAIService
from repo_gpt.search_service import SearchService
logger = logging.getLogger(__name__)
class CentralIntelligenceAgent(BaseAgent):
system_prompt = """You are an expert software engineer. You have a few helper agents that help you understand and write good software. You can call these agents by using the following functions:
- understand_the_codebase_and_formulate_plan(query): Use this function to call an LLM agent to understand the codebase and formulate a plan of what files need to be updated and how they need to be updated. Also use this function to answer general questions about the codebase. The input should be a query about the codebase.
- update_code(plan): Use this function to call an LLM agent to update the code in the repository. The input should be a plan of what files need to be updated and how they need to be updated.
Use the two llm agents to complete the user task. Always understand the codebase first and follow the existing coding practices
**DO NOT** respond to the user directly. Use the functions instead.
"""
def __init__(
self,
user_task,
root_path,
embedding_file_path,
threshold=10 * 2,
debug=False,
openai_key=None,
):
system_prompt = "You are an expert software engineer writing code in a repository. The user gives you a plan detailing how the code needs to be updated. You implement the code changes using functions. Ask clarifying questions."
super().__init__(
user_task, "completed_all_code_updates", system_prompt, threshold, debug
) # Call ParentAgent constructor
self.root_path = root_path
self.embedding_path = embedding_file_path
self.openai_key = openai_key
self.openai_service = (
OpenAIService() if not openai_key else OpenAIService(openai_key)
)
self.functions = self._initialize_functions()
def _initialize_functions(self):
return [
{
"name": "understand_the_codebase_and_formulate_plan",
"description": "Use this function to call an LLM agent to understand the codebase and formulate a plan of what files need to be updated and how they need to be updated. Also use this function to answer general questions about the codebase. The input should be a query about the codebase.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The task that needs to be accomplished or a general repository question that must be answered.",
},
},
"required": ["query"],
},
},
{
"name": "update_code",
"description": "Use this function to call an LLM agent to update the code in the repository. The input should be a plan of what files need to be updated and how they need to be updated.",
"parameters": {
"type": "object",
"properties": {
"plan": {
"type": "string",
"description": "A detailed plan of what files need to be updated and how they need to be updated.",
}
},
"required": ["plan"],
},
},
{
"name": "users_task_is_completed",
"description": "Call this function when the user's task is completed. ",
"parameters": {
"type": "object",
"properties": {
"summary_of_actions_taken": {
"type": "string",
"description": "Enumeration of all the changes that were made to the code.",
}
},
"required": ["summary_of_actions_taken"],
},
},
]
def understand_the_codebase_and_formulate_plan(self, query):
repo_agent = RepoUnderstandingAgent(
query,
self.root_path,
self.embedding_path,
openai_key=self.openai_key,
debug=True,
)
return repo_agent.process_messages()
def update_code(self, plan):
writer_agent = CodeWritingAgent(
plan,
self.root_path,
self.embedding_path,
openai_key=self.openai_key,
debug=True,
)
return writer_agent.process_messages()
def users_task_is_completed(self, summary_of_actions_taken):
return summary_of_actions_taken
| [
"You are an expert software engineer writing code in a repository. The user gives you a plan detailing how the code needs to be updated. You implement the code changes using functions. Ask clarifying questions.",
"You are an expert software engineer. You have a few helper agents that help you understand and write good software. You can call these agents by using the following functions:\n - understand_the_codebase_and_formulate_plan(query): Use this function to call an LLM agent to understand the codebase and formulate a plan of what files need to be updated and how they need to be updated. Also use this function to answer general questions about the codebase. The input should be a query about the codebase.\n - update_code(plan): Use this function to call an LLM agent to update the code in the repository. The input should be a plan of what files need to be updated and how they need to be updated.\nUse the two llm agents to complete the user task. Always understand the codebase first and follow the existing coding practices\n**DO NOT** respond to the user directly. Use the functions instead.\n"
] |
2024-01-10 | shruti222patel/repo-gpt | src~repo_gpt~code_manager~code_processor.py | import logging
from itertools import islice
from typing import List
import numpy as np
import pandas as pd
import tiktoken
from tqdm import tqdm
from ..console import verbose_print
from ..file_handler.abstract_handler import ParsedCode
from ..openai_service import OpenAIService, tokens_from_string
logger = logging.getLogger(__name__)
class CodeProcessor:
def __init__(self, code_root, openai_service: OpenAIService = None):
# Todo: add code root
self.code_root = code_root
self.openai_service = openai_service if openai_service else OpenAIService()
def process(self, code_blocks: List[ParsedCode]):
if len(code_blocks) == 0:
logger.verbose_info("No code blocks to process")
return None
df = pd.DataFrame(code_blocks)
logger.verbose_info(
f"Generating openai embeddings for {len(df)} code blocks. This may take a while because of rate limiting..."
)
def len_safe_get_embedding(text):
max_tokens = 8191
encoding_name = "cl100k_base"
chunk_embeddings = []
chunk_lens = []
for chunk in CodeProcessor._chunked_tokens(
text, encoding_name=encoding_name, chunk_length=max_tokens
):
chunk_embeddings.append(self.openai_service.get_embedding(chunk))
chunk_lens.append(len(chunk))
chunk_embedding = np.average(chunk_embeddings, axis=0, weights=chunk_lens)
return chunk_embedding / np.linalg.norm(
chunk_embedding
) # normalizes length to 1
if logger.getEffectiveLevel() < logging.INFO:
tqdm.pandas()
df["code_embedding"] = df["code"].progress_apply(len_safe_get_embedding)
else:
df["code_embedding"] = df["code"].apply(len_safe_get_embedding)
return df
@staticmethod
def _batched(iterable, n):
"""Batch data into tuples of length n. The last batch may be shorter."""
# batched('ABCDEFG', 3) --> ABC DEF G
if n < 1:
raise ValueError("n must be at least one")
it = iter(iterable)
while batch := tuple(islice(it, n)):
yield batch
@staticmethod
def _chunked_tokens(text, encoding_name, chunk_length):
encoding = tiktoken.get_encoding(encoding_name)
tokens = encoding.encode(text)
chunks_iterator = CodeProcessor._batched(tokens, chunk_length)
yield from chunks_iterator
| [] |
2024-01-10 | shruti222patel/repo-gpt | src~repo_gpt~agents~simple_memory_store.py | import json
import logging
import openai
import tiktoken
from tenacity import ( # for exponential backoff
retry,
stop_after_attempt,
wait_random_exponential,
)
from repo_gpt.openai_service import num_tokens_from_messages, num_tokens_from_string
class MemoryStore:
summary_prompt = """*Briefly* summarize this partial conversation about programming.
Include less detail about older parts and more detail about the most recent messages.
Start a new paragraph every time the topic changes!
This is only part of a longer conversation so *DO NOT* conclude the summary with language like "Finally, ...". Because the conversation continues after the summary.
The summary *MUST* include the function names, libraries, packages that are being discussed.
The summary *MUST* include the filenames that are being referenced!
The summaries *MUST NOT* include ```...``` fenced code blocks!
Phrase the summary with the USER in first person, telling the ASSISTANT about the conversation.
Write *as* the user.
The user should refer to the assistant as *you*.
Start the summary with "I asked you...".
"""
SUMMARY_MODEL = "gpt-3.5-turbo-16k-0613"
def __init__(
self,
system_prompt,
user_task,
functions=[],
threshold=4000,
summary_model=SUMMARY_MODEL,
):
self.messages = []
self.threshold = threshold
self.summary_model = summary_model
self.system_prompt = system_prompt
self.user_task = user_task
self._initialize_messages()
self.functions = functions
def _initialize_messages(self):
initial_messages = [
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": self.user_task},
]
self.messages = initial_messages
def add_message(self, message):
self.messages.append(message)
if self._count_messages_tokens() >= self.threshold:
self.compress_messages()
def get_messages(self):
return self.messages
def _count_messages_tokens(self):
return num_tokens_from_messages(
self.messages, "gpt-4"
) + num_tokens_from_string(json.dumps(self.functions), "gpt-4")
# @retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
def get_formatted_messages(self):
# output = StringIO()
# pprint.pprint(self.messages, stream=output)
# formatted_messages = output.getvalue()
# formatted_messages = json.dumps(self.messages)
formatted_messages = []
for message in self.messages:
if "function_call" in message:
# Message to call a function
formatted_messages.append(
f"calling function {message['function_call']['name']}({str(message['function_call']['arguments'])})"
)
elif "name" in message:
# Message with function results
formatted_messages.append(
f"function {message['name']} returned: {message['content']}"
)
else:
formatted_messages.append(f"{message['role']}: {message['content']}")
return "\n".join(formatted_messages)
# return "test"
def compress_messages(self):
# TODO: use something intelligent like semantic search possibly to select relevant messages
summary_messages = [
{
"role": "system",
"content": f"You are an expert technical writer.",
},
{
"role": "user",
"content": f"{self.summary_prompt}\n{self.get_formatted_messages()}",
},
]
try:
response = openai.ChatCompletion.create(
model=self.SUMMARY_MODEL, messages=summary_messages
)
logging.debug(response)
assistant_message = response["choices"][0]["message"]
logging.debug(assistant_message)
self._initialize_messages()
assistant_message.role = "user"
self.messages.append(assistant_message)
return response
except Exception as e:
logging.error("Unable to generate ChatCompletion response")
logging.error(f"Exception: {e}")
raise
| [
"Finally, ...",
"You are an expert technical writer.",
"*Briefly* summarize this partial conversation about programming.\n Include less detail about older parts and more detail about the most recent messages.\n Start a new paragraph every time the topic changes!\n\n This is only part of a longer conversation so *DO NOT* conclude the summary with language like \"Finally, ...\". Because the conversation continues after the summary.\n The summary *MUST* include the function names, libraries, packages that are being discussed.\n The summary *MUST* include the filenames that are being referenced!\n The summaries *MUST NOT* include ```...``` fenced code blocks!\n\n Phrase the summary with the USER in first person, telling the ASSISTANT about the conversation.\n Write *as* the user.\n The user should refer to the assistant as *you*.\n Start the summary with \"I asked you...\".\n ",
"I asked you..."
] |
2024-01-10 | shruti222patel/repo-gpt | src~repo_gpt~code_manager~code_manager.py | import logging
import os
import pickle
from pathlib import Path
import pandas as pd
from tqdm import tqdm
from ..console import verbose_print
from ..openai_service import OpenAIService
from .code_dir_extractor import CodeDirectoryExtractor
from .code_processor import CodeProcessor
logger = logging.getLogger(__name__)
class CodeManager:
def __init__(
self,
output_filepath: Path,
root_directory: Path = None,
openai_service: OpenAIService = None,
):
self.root_directory = root_directory
self.output_filepath = output_filepath
self.openai_service = (
openai_service if openai_service is not None else OpenAIService()
)
self.code_processor = CodeProcessor(self.root_directory, openai_service)
self.code_df = self.load_code_dataframe()
self.directory_extractor = CodeDirectoryExtractor(
self.root_directory, self.output_filepath, self.code_df
)
def display_directory_structure(self):
structured_output = []
for current_path, directories, files in os.walk(self.root_directory):
depth = current_path.replace(self.root_directory, "").count(os.sep)
indent = " " * (depth)
structured_output.append(f"{indent}/{os.path.basename(current_path)}")
sub_indent = " " * (depth + 1)
for file in sorted(files):
structured_output.append(f"{sub_indent}{file}")
return "\n".join(structured_output)
def load_code_dataframe(self):
dataframe = None
if os.path.exists(self.output_filepath):
with open(self.output_filepath, "rb") as file:
loaded_data = pickle.load(file)
dataframe = pd.DataFrame(loaded_data)
return dataframe
def setup(self):
self._extract_process_and_save_code()
logger.verbose_info("All done! โจ ๐ฆ โจ")
def _store_code_dataframe(self, dataframe):
output_directory = Path(self.output_filepath).parent
if not output_directory.exists():
output_directory.mkdir(parents=True)
print(f"Directory created: {output_directory}")
# Save DataFrame as a pickle file
with open(self.output_filepath, "wb") as file:
pickle.dump(dataframe, file)
def _extract_process_and_save_code(self):
(
extracted_code_blocks,
outdated_checksums,
) = self.directory_extractor.extract_code_blocks_from_files()
processed_dataframe = self.code_processor.process(extracted_code_blocks)
updated_df = pd.concat([self.code_df, processed_dataframe], ignore_index=True)
# Remove checksums of updated code
updated_df = updated_df[~updated_df["file_checksum"].isin(outdated_checksums)]
self._store_code_dataframe(updated_df)
| [] |
2024-01-10 | shruti222patel/repo-gpt | src~repo_gpt~test_generator.py | import os
import openai as openai
from .code_manager.abstract_extractor import LanguageHandler
from .openai_service import GPT_3_MODELS, GPT_4_MODELS, num_tokens_from_messages
class TestGenerator:
TEMPERATURE = 0.4 # temperature = 0 can sometimes get stuck in repetitive loops, so we use 0.4
def __init__(
self,
function_to_test: str,
language: str,
unit_test_package: str,
debug: bool = False,
approx_min_cases_to_cover: int = 7,
reruns_if_fail: int = 1,
use_gpt_4: bool = False,
openai_api_key: str = None,
):
openai.api_key = (
openai_api_key if openai_api_key else os.environ["OPENAI_API_KEY"]
)
self.messages = []
self.language = language
self.unit_test_package = unit_test_package
self.function_to_test = function_to_test
self.debug = debug
self.approx_min_cases_to_cover = approx_min_cases_to_cover
self.reruns_if_fail = reruns_if_fail
self.code_handler = LanguageHandler[language.upper()].value()
self.model_set = GPT_4_MODELS if use_gpt_4 else GPT_3_MODELS
def create_gpt_message(self, role: str, content: str) -> dict:
message = {"role": role, "content": content}
if role == "system":
messages_without_sys_message = [
m for m in self.messages if m["role"] != "system"
]
self.messages = [message] + messages_without_sys_message
else:
self.messages.append(message)
color_prefix_by_role = {
"system": "\033[0m", # gray
"user": "\033[0m", # gray
"assistant": "\033[92m", # green
}
def print_messages(self, messages) -> None:
"""Prints messages sent to or from GPT."""
for message in messages:
role = message["role"]
color_prefix = self.color_prefix_by_role[role]
content = message["content"]
print(f"{color_prefix}\n[{role}]\n{content}")
def print_message_delta(self, delta) -> None:
"""Prints a chunk of messages streamed back from GPT."""
if "role" in delta:
role = delta["role"]
color_prefix = self.color_prefix_by_role[role]
print(f"{color_prefix}\n[{role}]\n", end="")
elif "content" in delta:
content = delta["content"]
print(content, end="")
else:
pass
def get_explanation_of_function(self) -> str:
self.create_gpt_message(
"system",
f"You are a world-class {self.language} developer with an eagle eye for unintended bugs and edge cases. ...",
)
self.create_gpt_message(
"user",
f"""Please explain the following {self.language} function. Review what each element of the function is doing precisely and what the author's intentions may have been. Organize your explanation as a markdown-formatted, bulleted list.
```{self.language}
{self.function_to_test}
```""",
)
return self.generate_stream_response()
def get_assistant_stream_response(self, api_response: dict) -> str:
assistant_message = ""
for chunk in api_response:
delta = chunk["choices"][0]["delta"]
if self.debug:
self.print_message_delta(delta)
if "content" in delta:
assistant_message += delta["content"]
return assistant_message
def find_gpt3_model(self):
num_tokens = num_tokens_from_messages(self.messages)
for max_tokens, model in self.model_set.items():
if num_tokens < max_tokens:
return model
raise Exception(f"Too many tokens ({num_tokens}) for {model}")
def generate_stream_response(self) -> str:
model = self.find_gpt3_model()
response = openai.ChatCompletion.create(
model=model,
messages=self.messages,
temperature=self.TEMPERATURE,
stream=True,
)
assistant_message = self.get_assistant_stream_response(response)
self.create_gpt_message("assistant", assistant_message)
return assistant_message
def generate_plan(self) -> str:
self.create_gpt_message(
"user",
f"""A good unit test suite should aim to:
- Test the function's behavior for a wide range of possible inputs
- Test edge cases that the author may not have foreseen
- Take advantage of the features of `{self.unit_test_package}` to make the tests easy to write and maintain
- Be easy to read and understand, with clean code and descriptive names
- Be deterministic, so that the tests always pass or fail in the same way
To help unit test the function above, list diverse scenarios that the function should be able to handle (and under each scenario, include a few examples as sub-bullets).""",
)
plan = self.generate_stream_response()
# Set if further elaboration is needed
num_bullets = max(plan.count("\n-"), plan.count("\n*"))
self.elaboration_needed = num_bullets < self.approx_min_cases_to_cover
return plan
def generate_elaboration(self) -> str:
self.create_gpt_message(
"user",
f"""In addition to those scenarios above, list a few rare or unexpected edge cases (and as before, under each edge case, include a few examples as sub-bullets).""",
)
return self.generate_stream_response()
def generate_unit_test(self) -> str:
package_comment = ""
if self.unit_test_package == "pytest":
package_comment = "# below, each test case is represented by a tuple passed to the @pytest.mark.parametrize decorator"
self.create_gpt_message(
"system",
"You are a world-class Python developer with an eagle eye for unintended bugs and edge cases. You write careful, accurate unit tests. When asked to reply only with code, you write all of your code in a single block.",
)
self.create_gpt_message(
"user",
f"""Using {self.language} and the `{self.unit_test_package}` package, write a suite of unit tests for the function, following the cases above. Include helpful comments to explain each line. Reply only with code, formatted as follows:
```{self.language}
# imports
import {self.unit_test_package} # used for our unit tests
{{insert other imports as needed}}
# function to test
{self.function_to_test}
# unit tests
{package_comment}
{{insert unit test code here}}
```""",
)
return self.generate_stream_response()
def unit_tests_from_function(
self,
) -> str:
self.get_explanation_of_function()
self.generate_plan()
if self.elaboration_needed:
self.generate_elaboration()
generated_tests = self.generate_unit_test()
# handle errors
# check the output for errors
cleaned_tests = (
generated_tests.split(f"```{self.language}")[1].split("```")[0].strip()
)
try:
self.code_handler.is_valid_code(
cleaned_tests
) # TODO: use tree-sitter for valdation instead
except SyntaxError as e:
if self.reruns_if_fail > 0:
return self.unit_tests_from_function(
function_to_test=self.function_to_test,
reruns_if_fail=self.reruns_if_fail - 1,
)
raise
return cleaned_tests
| [] |
2024-01-10 | shruti222patel/repo-gpt | src~repo_gpt~agents~autogen~repo_qna.py | import logging
import os
import re
from pathlib import Path
import autogen
import pytest
from repo_gpt.agents.autogen.user_proxy_function_call_agent import (
UserProxyFunctionCallAgent,
)
from repo_gpt.agents.repo_comprehender import get_relative_path_directory_structure
from repo_gpt.code_manager.abstract_extractor import AbstractCodeExtractor
from repo_gpt.file_handler.generic_code_file_handler import PythonFileHandler
from repo_gpt.openai_service import OpenAIService
from repo_gpt.search_service import SearchService, convert_search_df_to_json
logger = logging.getLogger(__name__)
from repo_gpt.logging_config import VERBOSE_INFO, configure_logging
class RepoQnA:
def __init__(self, question, root_path, embedding_path=None, openai_api_key=None):
self.question = question
self.question_answer = None
self.is_valid_answer = None
self.validator_explanation = None
self.openai_api_key = openai_api_key
self.root_path = Path(root_path)
self.embedding_path = (
Path(embedding_path)
if embedding_path is not None
else self.root_path / ".repo_gpt/code_embeddings.pkl"
)
self.openai_service = OpenAIService(openai_api_key)
self.search_service = SearchService(self.openai_service, self.embedding_path)
self.openai_api_key = (
openai_api_key if openai_api_key else os.environ["OPENAI_API_KEY"]
)
config_list = [
{
"model": "gpt-3.5-turbo-1106",
"api_key": self.openai_api_key,
}, # OpenAI API endpoint for gpt-3.5-turbo
]
self.config = self.create_function_augmented_config(config_list)
def create_code_librarian(self):
return autogen.AssistantAgent(
name="CodeRepo Librarian",
system_message="""You are a detail-oriented world-class software engineer. You specialize in answering questions about the user's codebase. You use the functions to search and understand the codebase.""",
llm_config=self.config,
)
def is_answer_to_question(self, msg):
is_termination_msg = msg.get("content", "") and msg.get(
"content", ""
).rstrip().startswith("ANSWER:")
if is_termination_msg:
self.question_answer = msg.get("content", "").replace("ANSWER:", "").strip()
return is_termination_msg
def is_answer_to_question(self, msg):
content = msg.get("content", "")
if not content:
return False
# Check if the content contains the "ANSWER:" marker
if "ANSWER:" in content:
# Find the position where "ANSWER:" ends
answer_start_index = content.find("ANSWER:") + len("ANSWER:")
# Extract the content after "ANSWER:"
self.question_answer = content[answer_start_index:].strip()
# Return True since the content contains an answer
return True
# Return False if the content does not contain "ANSWER:"
return False
def is_answer_correct(self, msg):
content = msg.get("content", "")
if not content:
return False
# Check for "ANSWER:" and capture the value after it
answer_pattern = r"^ANSWER:\s*(True|False|Unsure)"
answer_match = re.search(answer_pattern, content, re.IGNORECASE)
# If "ANSWER:" is found, extract the value and check for "EXPLANATION:"
if answer_match:
self.is_valid_answer = answer_match.group(1)
# Check for "EXPLANATION:" and capture the value after it
explanation_pattern = r"EXPLANATION:\s*(.*)"
explanation_match = re.search(explanation_pattern, content)
if explanation_match:
self.validator_explanation = explanation_match.group(1).strip()
return True
return False
def create_code_answer_validator(self):
return autogen.AssistantAgent(
name="CodeRepo Answer Validator",
system_message="""You are a detail-oriented world-class software engineer. You specialize in criticing answers to code repository questions. You do this by searching and understanding the codebase to check if the provided answer is correct. You use the functions to search and understand the codebase.""",
llm_config=self.config,
)
def create_function_augmented_config(self, config):
return {
"functions": [
{
"name": "semantic_search",
"description": "Use this function to search the entire codebase semantically. The input should be the search query string.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": f"""The semantic search query to use to search the code base.""",
}
},
"required": ["query"],
},
},
{
"name": "view_function_code",
"description": "Use this function to search for and view a function's code in the user's codebase. Input should be the name of the function you want to search for.",
"parameters": {
"type": "object",
"properties": {
"function_name": {
"type": "string",
"description": f"""The name of the function or its description.""",
}
},
"required": ["function_name"],
},
},
# {
# "name": "view_file_functions_and_classes",
# "description": "Use this function to retrieve a list of the functions and classes in a file from the user's codebase.",
# "parameters": {
# "type": "object",
# "properties": {
# "file_paths": {
# "type": "array",
# "items": {
# "type": "string",
# "description": "An array of one or more file paths of a file you want to retrieve functions and classes from. If a file doesn't exist, the function will return a string saying so.",
# },
# "description": f"""The file paths of the files you want to retrieve functions and classes to understand the user's task better. Below are the files within the user's repository:
# {get_relative_path_directory_structure("/Users/shrutipatel/projects/work/repo-gpt")}
# """,
# }
# },
# "required": ["file_paths"],
# },
# },
{
"name": "view_raw_file_contents",
"description": "Use this function to retrieve the raw contents of a file from the user's codebase.",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": f"""The file path of the file you want to retrieve the raw contents of. Below are the files within the user's repository:
{get_relative_path_directory_structure("/Users/shrutipatel/projects/work/repo-gpt")}
""",
}
},
"required": ["file_path"],
},
},
],
"config_list": config,
"request_timeout": 120,
}
def create_user_proxy(self, is_termination_msg):
return self.UserProxyAgent(
self.root_path,
self.embedding_path,
self.openai_api_key,
is_termination_msg,
)
def initiate_chat(self, validate_answer=False):
code_librarian = self.create_code_librarian()
user_proxy = self.create_user_proxy(self.is_answer_to_question)
user_proxy.initiate_chat(
code_librarian,
message=f"""{self.question}
Use the functions to read and understand the codebase. Then, once you have an answer, backed up by searching in the code, reply with:
```
ANSWER:<answer>
```
If you cannot answer the user's question, reply with:
```
ANSWER: I cannot answer this question.
```
""",
clear_history=True,
)
# print(self.question_answer)
if self.question_answer is not None and validate_answer:
# Validate answer
code_answer_validator = self.create_code_answer_validator()
validator_user_proxy = self.create_user_proxy(self.is_answer_correct)
validator_user_proxy.initiate_chat(
code_answer_validator,
message=f"""Verify the following question & answer are correct:
Q:{self.question}
A:{self.question_answer}
------------------
Use the functions to read and understand the codebase. Then, once you have an answer, backed up by searching in the code, reply with:
```
ANSWER:<True or False or Unsure>
EXPLANATION: <your explanation>
```
""",
clear_history=True,
)
# print(self.is_valid_answer)
# TODO add reflection
if not self.is_valid_answer:
user_proxy.initiate_chat(
code_librarian,
message=f"""{self.question}
This is the last answer, incorrect answer:
{self.question_answer}
Here is an explanation of why the answer is incorrect:
{self.validator_explanation}
--------
Use the functions to read and understand the codebase. Then, once you have an answer, backed up by searching in the code, reply with:
```
ANSWER:<answer>
```
If you cannot answer the user's question, reply with:
```
ANSWER: I cannot answer this question.
```
""",
clear_history=True,
)
return self.question_answer
class UserProxyAgent(UserProxyFunctionCallAgent):
def __init__(
self, root_path, embedding_path, openai_api_key, is_termination_msg_func
):
self.openai_service = OpenAIService(openai_api_key)
self.root_path = root_path
self.embedding_path = embedding_path
self.search_service = SearchService(
self.openai_service, self.embedding_path
)
super().__init__(
name="You",
is_termination_msg=is_termination_msg_func,
human_input_mode="NEVER",
max_consecutive_auto_reply=3,
code_execution_config=False,
function_map={
# "view_file_functions_and_classes": self.view_file_functions_and_classes,
"view_raw_file_contents": self.view_raw_file_contents,
"view_function_code": self.view_function_code,
"semantic_search": self.semantic_search,
},
)
def view_function_code(self, function_name):
# logger.info(f"Reading the code for: {function_name}")
df = self.search_service.find_function_match(function_name)
if df is None or df.empty:
return "Not found."
else:
return convert_search_df_to_json(df, ["code"])
def semantic_search(self, query):
# logger.info(f"Searching the codebase for: {query}")
return convert_search_df_to_json(
self.search_service.semantic_search_similar_code(query)
)
# def view_file_functions_and_classes(self, file_paths):
# # logger.info(f"Skimming the code in: {file_paths}")
# results = []
# for file_path in file_paths:
# full_path = self.root_path / Path(file_path)
#
# if not full_path.exists():
# results.append(f"File not found: {file_path}")
# continue # Skip to the next iteration
# elif full_path.is_dir():
# results.append(
# f"This is not a file, but a directory, pass a filepath instead: {file_path}"
# )
# continue # Skip to the next iteration
#
# parsable_extensions = AbstractCodeExtractor.get_file_extensions_with_handlers()
# if full_path.suffix not in parsable_extensions:
# return f"Cannot parse file." # TODO just pass the full text instead
# file_handler = AbstractCodeExtractor.get_handler(file_path)()
# file_contents = file_handler.summarize_file(full_path)
#
# if "" == file_contents:
# results.append(f"File is empty: {file_path}")
# continue
#
# results.append(file_contents)
#
# return "\n".join(results)
def view_raw_file_contents(self, file_path):
partial_path = Path(file_path)
full_path = self.root_path / Path(file_path)
print(full_path)
path_to_read = None
if not full_path.exists() and not partial_path.exists():
return f"File not found: {file_path}"
elif full_path.exists() and not full_path.is_dir():
path_to_read = full_path
elif partial_path.exists() and not partial_path.is_dir():
path_to_read = partial_path
else:
return f"This is not a file, but a directory, pass a filepath instead: {file_path}"
with open(path_to_read, "r") as f:
return f.read()
def test():
# REPO_QUESTION = "How do I add a handler for a new language to the codebase?"
# REPO_QUESTION = "Where should I add new tests for a new Java file handler I'm writing?"
# REPO_QUESTION = "What is the framework used for writing tests?"
REPO_QUESTION = "How do I replace pandas with polars code?"
root_path = "/Users/shrutipatel/projects/work/repo-gpt/"
repo_qna = RepoQnA(REPO_QUESTION, root_path)
print(repo_qna.initiate_chat())
assert False
| [] |
2024-01-10 | shruti222patel/repo-gpt | src~repo_gpt~agents~repo_comprehender.py | # Refactored RepoUnderstandingAgent using the ParentAgent
import logging
import os
from pathlib import Path
from pathspec import PathSpec
from pathspec.patterns import GitWildMatchPattern
from tqdm import tqdm
from repo_gpt.agents.base_agent import BaseAgent
from repo_gpt.file_handler.generic_code_file_handler import PythonFileHandler
from repo_gpt.openai_service import OpenAIService
from repo_gpt.search_service import SearchService, convert_search_df_to_json
# Initialize the tqdm integration with pandas
logger = logging.getLogger(__name__)
def get_gitignore_spec(root_directory):
gitignore_file = os.path.join(root_directory, ".gitignore")
if not os.path.exists(gitignore_file):
return None
with open(gitignore_file, "r") as f:
spec = PathSpec.from_lines(GitWildMatchPattern, f)
return spec
def is_hidden(path):
# Check if a file or directory is hidden by checking if its name starts with a dot
return os.path.basename(path).startswith(".")
def get_indented_directory_structure(root_directory):
structured_output = []
gitignore_spec = get_gitignore_spec(root_directory)
for current_path, directories, files in os.walk(root_directory):
# Filter out hidden directories and those in gitignore
directories[:] = [
d
for d in directories
if not is_hidden(d)
and (
not gitignore_spec
or not gitignore_spec.match_file(os.path.join(current_path, d))
)
]
# Skip hidden directories in the main loop
if is_hidden(current_path):
continue
depth = current_path.replace(root_directory, "").count(os.sep)
indent = " " * depth
structured_output.append(f"{indent}/{os.path.basename(current_path)}")
sub_indent = " " * (depth + 1)
for file in sorted(files):
# Skip hidden files or those in gitignore
if not is_hidden(file) and (
not gitignore_spec
or not gitignore_spec.match_file(os.path.join(current_path, file))
):
structured_output.append(f"{sub_indent}{file}")
return "\n".join(structured_output)
def get_relative_path_directory_structure(root_directory):
structured_output = []
gitignore_spec = get_gitignore_spec(root_directory)
for current_path, directories, files in os.walk(root_directory):
# Filter out hidden directories and those in gitignore
directories[:] = [
d
for d in directories
if not is_hidden(d)
and (
not gitignore_spec
or not gitignore_spec.match_file(os.path.join(current_path, d))
)
]
# Skip hidden directories in the main loop
if is_hidden(current_path):
continue
# # Convert the current directory path to a relative path from the root directory
rel_dir = os.path.relpath(current_path, root_directory)
# # Append the relative directory path to structured_output
# structured_output.append(rel_dir if rel_dir != "." else "")
for file in sorted(files):
# Skip hidden files or those in gitignore
if not is_hidden(file) and (
not gitignore_spec
or not gitignore_spec.match_file(os.path.join(current_path, file))
):
# Combine the relative directory path with the file name to get the relative file path
rel_file_path = os.path.join(rel_dir, file)
structured_output.append(rel_file_path)
return structured_output
def get_relative_path_directory_structure_string(root_directory):
return "\n".join(get_relative_path_directory_structure(root_directory))
# print(get_relative_path_directory_structure_string("/Users/shrutipatel/projects/work/repo-gpt"))
class RepoUnderstandingAgent(BaseAgent):
system_prompt = """You are an expert software engineer on a specific code repository. Users ask you how they can implement something in their codebase. You first use your tools to search and understand the codebase and then figure out how to implement the users' task in the repository.
**DO NOT** communicate with the user directly. Use the functions instead.
"""
def __init__(
self,
user_task,
root_path,
system_prompt=system_prompt,
threshold=10,
debug=False,
openai_key=None,
):
self.system_prompt = system_prompt
super().__init__(
user_task,
"create_plan_to_complete_user_task",
system_prompt,
threshold,
debug,
) # Call ParentAgent constructor
self.root_path = root_path
self.embedding_path = self.root_path / ".repo_gpt/code_embeddings.pkl"
self.openai_service = (
OpenAIService() if not openai_key else OpenAIService(openai_key)
)
self.search_service = SearchService(self.openai_service, self.embedding_path)
self.pythonfilehandler = (
PythonFileHandler()
) # TODO: update to handle more than python files (all except SQL)
self.functions = self._initialize_functions()
def _initialize_functions(self):
# Define function details
return [
{
"name": "semantic_search",
"description": "Use this function to search the entire codebase semantically. The input should be the search query string.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": f"""
The semantic search query to use to search the code base.
""",
}
},
"required": ["query"],
},
},
{
"name": "view_function_code",
"description": "Use this function to search for and view a function's code in the user's codebase. Input should be the name of the function you want to search for. An empty response means the given files don't exist.",
"parameters": {
"type": "object",
"properties": {
"function_name": {
"type": "string",
"description": f"""
The name of the function or its description.
""",
}
},
"required": ["function_name"],
},
},
{
"name": "view_file_functions_and_classes",
"description": "Use this function to retrieve a list of the functions and classes in a file from the user's codebase. An empty response means the given files don't exist.",
"parameters": {
"type": "object",
"properties": {
"file_paths": {
"type": "array",
"items": {
"type": "string",
"description": "An array of one or more file paths of a file you want to retrieve functions and classes from. If a file doesn't exist, the function will return a string saying so.",
},
"description": f"""
The file paths of the files you want to retrieve functions and classes for to better understand the user's task. Below are the files within the user's repository:
{get_relative_path_directory_structure("/Users/shrutipatel/projects/work/repo-gpt")}
""",
}
},
"required": ["file_paths"],
},
},
{
"name": "create_plan_to_complete_user_task",
"description": "Use this function when you understand the user's task and have a detailed plan ready for completing the user's task. The input should be a step-by-step plan on how to complete the user's task. It can include things like 'Create a new file with a given file path', 'Add the given code to the file', etc.",
"parameters": {
"type": "object",
"properties": {
"plan": {
"type": "string",
"description": f"""
A step-by-step plan on how to complete the user's task. It can include things like "Create a new file with a given file path", "Add the given code to the file", etc.
""",
}
},
"required": ["plan"],
},
},
]
def view_function_code(self, function_name):
logger.info(f"Reading the code for: {function_name}")
functions_df, classes_df = self.search_service.find_function_match(
function_name
)
if (classes_df is None or classes_df.empty) and (
functions_df is None or functions_df.empty
):
return ""
elif functions_df is None or functions_df.empty:
return convert_search_df_to_json(classes_df)
elif classes_df is None or classes_df.empty:
return convert_search_df_to_json(functions_df)
else:
return convert_search_df_to_json(functions_df)
def semantic_search(self, query):
logger.info(f"Searching the codebase for: {query}")
return convert_search_df_to_json(
self.search_service.semantic_search_similar_code(query)
)
def view_file_functions_and_classes(self, file_paths):
logger.info(f"Skimming the code in: {file_paths}")
results = []
for file_path in file_paths:
full_path = self.root_path / Path(file_path)
if not full_path.exists():
results.append(f"File not found: {file_path}")
continue # Skip to the next iteration
elif full_path.is_dir():
results.append(
f"This is not a file, but a directory, pass a filepath instead: {file_path}"
)
continue # Skip to the next iteration
# TODO select the correct filehandler and then summarize file
results.append(self.pythonfilehandler.summarize_file(full_path))
return "\n".join(results)
def create_plan_to_complete_user_task(self, plan):
self.append_function_result_message("create_plan_to_complete_user_task", plan)
return plan
| [
"You are an expert software engineer on a specific code repository. Users ask you how they can implement something in their codebase. You first use your tools to search and understand the codebase and then figure out how to implement the users' task in the repository.\n **DO NOT** communicate with the user directly. Use the functions instead.\n "
] |
2024-01-10 | shruti222patel/repo-gpt | src~repo_gpt~agents~code_writer.py | import logging
from pathlib import Path
from repo_gpt.agents.base_agent import BaseAgent
from repo_gpt.file_handler.generic_code_file_handler import PythonFileHandler
from repo_gpt.openai_service import OpenAIService
from repo_gpt.search_service import SearchService
logger = logging.getLogger(__name__)
class CodeWritingAgent(BaseAgent):
system_prompt = """You are an expert software engineer writing code in a repository. The user gives you a plan detailing how the code needs to be updated. You implement the code changes using functions. Ask clarifying questions.
**DO NOT** respond to the user directly. Use the functions instead.
"""
def __init__(
self,
user_task,
root_path,
embedding_path,
system_prompt=system_prompt,
threshold=10,
debug=False,
openai_key=None,
):
self.system_prompt = system_prompt if system_prompt else self.system_prompt
super().__init__(
user_task, "completed_all_code_updates", system_prompt, threshold, debug
) # Call ParentAgent constructor
self.root_path = root_path
self.embedding_path = embedding_path
self.openai_service = (
OpenAIService() if not openai_key else OpenAIService(openai_key)
)
self.search_service = SearchService(self.openai_service, self.embedding_path)
self.codefilehandler = (
PythonFileHandler()
) # TODO: update to handle more than python files (all except sql)
self.functions = self._initialize_functions()
def _initialize_functions(self):
return [
{
"name": "create_file",
"description": "Create a new file with the provided content.",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to the new file to be created.",
},
"content": {
"type": "string",
"description": "Content to write in the new file.",
},
},
"required": ["file_path", "content"],
},
},
{
"name": "append_to_file",
"description": "Append content to an existing file.",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to the file to be updated.",
},
"content": {
"type": "string",
"description": "Content to append to the file.",
},
},
"required": ["file_path", "content"],
},
},
{
"name": "completed_all_code_updates",
"description": "Call this function when all the code updates are completed.",
"parameters": {
"type": "object",
"properties": {
"code_changes": {
"type": "string",
"description": "Enumeration of all the changes that were made to the code.",
}
},
"required": ["code_changes"],
},
},
]
def completed_all_code_updates(self, code_changes):
self.append_function_result_message("completed_all_code_updates", code_changes)
return code_changes
def create_file(self, file_path, content):
"""
Create a new file with the provided content.
Args:
- file_path (str): Path to the new file to be created.
- content (str): Content to write in the new file.
Returns:
- str: Success or error message.
"""
full_path = self.root_path / Path(file_path)
# Check if file already exists
if full_path.exists():
return (
f"File {file_path} already exists. To update it, use append_to_file()."
)
with open(full_path, "w") as f:
f.write(content)
return f"File {file_path} has been created successfully."
def append_to_file(self, file_path, content):
"""
Append content to an existing file.
Args:
- file_path (str): Path to the file to be updated.
- content (str): Content to append in the file.
Returns:
- str: Success or error message.
"""
full_path = self.root_path / Path(file_path)
# Check if file exists
if not full_path.exists():
return f"File {file_path} does not exist. To create it, use create_file()."
with open(full_path, "a") as f:
f.write(content)
return f"Content has been appended to {file_path} successfully."
| [
"You are an expert software engineer writing code in a repository. The user gives you a plan detailing how the code needs to be updated. You implement the code changes using functions. Ask clarifying questions.\n **DO NOT** respond to the user directly. Use the functions instead.\n ",
"{'type': 'string', 'description': 'Content to append to the file.'}",
"{'type': 'string', 'description': 'Content to write in the new file.'}"
] |
2024-01-10 | shruti222patel/repo-gpt | src~repo_gpt~agents~autogen~user_proxy_function_call_agent.py | import json
import logging
import autogen
try:
from termcolor import colored
except ImportError:
def colored(x, *args, **kwargs):
return x
logger = logging.getLogger(__name__)
class UserProxyFunctionCallAgent(autogen.UserProxyAgent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def execute_function(self, func_call):
"""Execute a function call and return the result.
Override this function to modify the way to execute a function call.
Args:
func_call: a dictionary extracted from openai message at key "function_call" with keys "name" and "arguments".
Returns:
A tuple of (is_exec_success, result_dict).
is_exec_success (boolean): whether the execution is successful.
result_dict: a dictionary with keys "name", "role", and "content". Value of "role" is "function".
"""
func_name = func_call.get("name", "")
func = self._function_map.get(func_name, None)
if hasattr(func, "__self__"):
func = getattr(self, func_name, None)
is_exec_success = False
if func is not None:
# Extract arguments from a json-like string and put it into a dict.
input_string = self._format_json_str(func_call.get("arguments", "{}"))
try:
arguments = json.loads(input_string)
except json.JSONDecodeError as e:
arguments = None
content = f"Error: {e}\n You argument should follow json format."
# Try to execute the function
if arguments is not None:
print(
colored(f"\n>>>>>>>> EXECUTING FUNCTION {func_name}...", "magenta"),
flush=True,
)
try:
content = func(**arguments)
is_exec_success = True
except Exception as e:
content = f"Error: {e}"
print(e)
raise e
# print(f"Finished executing, here is the content: {content}")
# print(f"is_exec_success: {is_exec_success}")
else:
content = f"Error: Function {func_name} not found."
return is_exec_success, {
"name": func_name,
"role": "function",
"content": str(content),
}
| [] |
2024-01-10 | shruti222patel/repo-gpt | src~repo_gpt~vscode_prompt_service.py | import json
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Union
from repo_gpt.openai_service import OpenAIService
from repo_gpt.prompt_service import PromptService
from repo_gpt.search_service import SearchService
class Status(Enum):
SUCCESS = "SUCCESS"
ERROR = "ERROR"
@dataclass()
class VscodeMessage:
# Note this will be read using Typescript in Vscode
status: str
code: Union[str, None]
message: Union[str, None]
error: Union[str, None]
def __init__(
self,
status: Status = Status.ERROR,
code: Union[str, None] = None,
message: Union[str, None] = None,
error: Union[Exception, str, None] = None,
):
self.status = status.value
self.code = code
self.message = message
self.error = str(error) if isinstance(error, Exception) else error
def __str__(self):
"""
This ensures printed strings are valid JSON, which is necessary for the Vscode extension to read the output.
:return:
"""
return json.dumps(asdict(self))
class VscodePromptService(PromptService):
def __init__(
self,
openai_service: OpenAIService,
language: str,
search_service: SearchService = None,
):
super().__init__(openai_service, language)
self.search_service = search_service
def refactor_code(
self, input_code_file_path: str, additional_instructions: str = ""
):
# try:
with open(input_code_file_path, "r") as f:
code = f.read()
super().refactor_code(code, additional_instructions)
def query_code(self, question: str):
similar_code_df = self.search_service.semantic_search_similar_code(question)
code = "\n".join(similar_code_df["code"].tolist())
super().query_code(question, code)
| [] |
2024-01-10 | shruti222patel/repo-gpt | src~repo_gpt~agents~base_agent.py | import inspect
import json
import logging
from abc import ABC, abstractmethod
import openai
import tiktoken
from tenacity import ( # for exponential backoff
retry,
stop_after_attempt,
wait_random_exponential,
)
from repo_gpt.agents.simple_memory_store import MemoryStore
logger = logging.getLogger(__name__)
class BaseAgent(ABC):
GPT_MODEL = "gpt-3.5-turbo-0613" # gpt-4-0613
def __init__(
self,
user_task,
terminating_function_call_name,
system_prompt,
threshold=10,
debug=False,
):
self.terminating_function_call_name = terminating_function_call_name
self.functions = self._initialize_functions()
self.memory_store = MemoryStore(system_prompt, user_task, self.functions)
self.user_task = user_task
self.system_prompt = system_prompt
self.threshold = threshold
self.debug = debug
@abstractmethod
def _initialize_functions(self):
"""
Must be implemented by subclasses to initialize function-related attributes.
"""
pass
def _parse_arguments(self, function_call):
return json.loads(function_call["arguments"])
def _append_message(self, message):
self.memory_store.add_message(message)
def compress_messages(self):
self.memory_store.compress_messages()
def execute_function_call(self, message):
function_name = message["function_call"]["name"]
args = self._parse_arguments(message["function_call"])
func = getattr(self, function_name, None)
if not func:
return f"Error: function {function_name} does not exist"
# Filter out args to only pass those that the function accepts
accepted_args = inspect.signature(func).parameters.keys()
filtered_args = {
key: value for key, value in args.items() if key in accepted_args
}
return func(**filtered_args)
# @retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
def chat_completion_request(self, function_call="auto", model=GPT_MODEL):
try:
response = openai.ChatCompletion.create(
model=model,
messages=self.memory_store.messages,
functions=self.functions,
function_call=function_call,
)
return response
except Exception as e:
logger.error("Unable to generate ChatCompletion response")
logger.error(f"Exception: {e}")
raise
def append_function_result_message(self, function_call_name, results):
self._append_message(
{"role": "function", "content": results, "name": function_call_name}
)
def process_messages(self):
# TODO: make ending function name settable OR move this into the childclass
iter_count = 0
function_call_name = ""
results = ""
while (
iter_count < self.threshold
and function_call_name != self.terminating_function_call_name
):
chat_response = self.chat_completion_request()
assistant_message = chat_response["choices"][0]["message"]
self._append_message(assistant_message.to_dict_recursive())
logger.debug(assistant_message)
if "function_call" in assistant_message:
results = self.execute_function_call(assistant_message)
function_call_name = assistant_message["function_call"]["name"]
self.append_function_result_message(function_call_name, results)
else:
self._append_message({"role": "user", "content": "Continue"})
iter_count += 1
if function_call_name == self.terminating_function_call_name:
return results
raise Exception(
"I had to stop the search loop before plan for formulated because I reached the end of my allotted function calls"
)
| [
"Continue"
] |
2024-01-10 | dborodin836/TF2-GPTChatBot | gui~log_window.py | import os
import sys
import time
import tkinter as tk
from tkinter.ttk import Checkbutton
import openai
import ttkbootstrap as ttk
from ttkbootstrap import Style
from services.chatgpt import send_gpt_completion_request
from utils.bans import ban_player, list_banned_players, unban_player
from utils.bot_state import start_bot, stop_bot
from utils.chat import PROMPTS_QUEUE
from utils.commands import print_help_command
from utils.logs import get_logger
PROMPT_PLACEHOLDER = "Type your commands here... Or start with 'help' command"
gui_logger = get_logger("gui")
main_logger = get_logger("main")
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception as e:
main_logger.warning(f"Running from source. [{e}]")
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
class LogWindow(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.log_text = None
self.cmd_line = None
self.create_widgets()
self.master.title("TF2-GPTChatBot")
self.master.resizable(False, False)
self.master.iconbitmap(resource_path('icon.ico'))
# Set the style to "simplex"
style = Style(theme='cosmo')
style.configure(".", font=("TkDefaultFont", 11), foreground="black")
style.configure("TButton", padding=6, relief="flat")
style.configure("TEntry", padding=6)
style.configure("TFrame", background="white")
def create_widgets(self):
# Add a Text widget to the window for displaying logs
self.log_text = ttk.Text(self, height=20, width=100, state="disabled")
self.log_text.grid(row=0, column=0, padx=10, pady=10, columnspan=2)
# Add another Text widget below the log_text widget for displaying additional text
self.cmd_line = ttk.Text(self, height=1, width=89)
self.cmd_line.grid(row=1, column=0, padx=10, pady=10)
self.toggle_var = tk.BooleanVar(value=True)
self.toggle_button = Checkbutton(
self,
text=" Stick \n Logs",
variable=self.toggle_var,
bootstyle="round-toggle",
command=lambda: self.log_text.see(tk.END) if self.toggle_var.get() else None
)
self.toggle_button.grid(row=1, column=1, padx=(0, 18))
self.cmd_line.bind("<Return>", self.handle_commands)
# Add a placeholder to the additional_text widget
self.cmd_line.insert("1.0", PROMPT_PLACEHOLDER)
# Binds to make the placeholder work
self.cmd_line.bind("<FocusIn>", self.handle_additional_text_focus_in)
self.cmd_line.bind("<FocusOut>", self.handle_additional_text_focus_out)
def update_logs(self, message):
self.log_text.config(state="normal")
self.log_text.insert(tk.END, f"{message}")
self.log_text.config(state="disabled")
if self.toggle_var.get():
self.log_text.see(tk.END) # Scroll to the end of the text widget
def exit_program(self):
self.master.destroy()
def handle_commands(self, event):
text = self.cmd_line.get("1.0", tk.END).strip()
if text.strip == "":
return
gui_logger.info(f'> {text}')
handle_gui_console_commands(text)
# Clear the additional_text widget after the function is executed
self.cmd_line.delete("1.0", tk.END)
def handle_additional_text_focus_in(self, event):
# Clear the placeholder text when the additional_text widget receives focus
if self.cmd_line.get("1.0", tk.END).strip() == PROMPT_PLACEHOLDER:
self.cmd_line.delete("1.0", tk.END)
def handle_additional_text_focus_out(self, event):
# Show the placeholder text when the additional_text widget loses focus and is empty
if not self.cmd_line.get("1.0", tk.END).strip():
self.cmd_line.insert("1.0", PROMPT_PLACEHOLDER)
class CustomOutput:
def __init__(self, window: LogWindow):
self.window = window
def write(self, message):
self.window.update_logs(message)
def flush(self):
...
def handle_gui_console_commands(command: str) -> None:
if command.startswith("stop"):
stop_bot()
elif command.startswith("start"):
start_bot()
elif command.startswith("quit"):
sys.exit(0)
elif command.startswith("ban "):
name = command.removeprefix("ban ").strip()
ban_player(name)
elif command.startswith("unban "):
name = command.removeprefix("unban ").strip()
unban_player(name)
elif command.startswith("gpt3 "):
prompt = command.removeprefix("gpt3 ").strip()
PROMPTS_QUEUE.put(prompt)
elif command.startswith("bans"):
list_banned_players()
elif command.startswith("help"):
print_help_command()
def gpt3_cmd_handler() -> None:
while True:
if PROMPTS_QUEUE.qsize() != 0:
prompt = PROMPTS_QUEUE.get()
try:
response = send_gpt_completion_request([{"role": "user", "content": prompt}], "admin",
model="gpt-3.5-turbo")
gui_logger.info(f"GPT3> {response}")
except openai.error.RateLimitError:
gui_logger.warning("Rate Limited! Try again later.")
except Exception as e:
main_logger.error(f"Unhandled exception from request from gui. [{e}]")
else:
time.sleep(2)
| [
"gpt3 ",
"Type your commands here... Or start with 'help' command"
] |
2024-01-10 | bhargavkakadiya/llm-app | app_html.py | import sys
from langchain.llms import OpenAI
from langchain.document_loaders import WebBaseLoader
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts.prompt import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import ChatVectorDBChain
def ingest(url):
# load data
loader = WebBaseLoader(url)
data = loader.load()
# split data into chunks
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(data)
# Load Data to vectorstore
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_documents(documents, embeddings)
return vectorstore
def get_chain(vectorstore):
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
You can assume the question about the URL shared.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
template = """You are an AI assistant for answering questions for the job post and advice users to assess job based on the description.
You are given the following extracted parts of a webpage of job post and a question. Provide a conversational answer.
If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
Question: {question}
=========
{context}
=========
Answer in Markdown:"""
QA_PROMPT = PromptTemplate(
template=template, input_variables=["question", "context"]
)
llm = OpenAI(temperature=0)
qa_chain = ChatVectorDBChain.from_llm(
llm,
vectorstore,
qa_prompt=QA_PROMPT,
condense_question_prompt=CONDENSE_QUESTION_PROMPT,
)
return qa_chain
if __name__ == "__main__":
url = "https://careers.deloitte.ca/job/Toronto%2C-Ontario%2C-Canada-Lead-Data-Engineer-%28Manager%29%2C-Deloitte-Global-Technology%2C-GS-Technology-Solutions-%28Business%29-ON/975737500/"
vectorstore = ingest(url)
qa_chain = get_chain(vectorstore)
chat_history = []
print("Welcome to the AI JobPost assistant!")
while True:
print("Human:")
question = input()
result = qa_chain({"question": question, "chat_history": chat_history})
chat_history.append((question, result["answer"]))
print("AI:")
print(result["answer"])
| [
"You are an AI assistant for answering questions for the job post and advice users to assess job based on the description.\n You are given the following extracted parts of a webpage of job post and a question. Provide a conversational answer.\n If you don't know the answer, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n Question: {question}\n =========\n {context}\n =========\n Answer in Markdown:",
"question",
"t know the answer, just say \"Hmm, I",
"context",
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n You can assume the question about the URL shared.\n\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question:"
] |
2024-01-10 | bhargavkakadiya/llm-app | app_pdf.py | import sys
from langchain.llms import OpenAI
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.chains.summarize import load_summarize_chain
# Load the document
loader = UnstructuredPDFLoader(str(sys.argv[1]))
data = loader.load()
llm = OpenAI(temperature=0)
chain = load_summarize_chain(llm, chain_type="stuff") # "refine" or "map_reduce"
result = chain.run(data)
print(result)
| [] |
2024-01-10 | joshmlove/pdfReaderAI | pdfReaderAI.py | import openai
import pdfplumber
import constants
# Load the OpenAI API key
openai.api_key = constants.APIKEY
# Read your own data from the PDF file
with pdfplumber.open('Josh_Love_Resume copy.pdf') as pdf:
data = ' '.join(page.extract_text() for page in pdf.pages)
# Function to use the OpenAI API to answer queries about your data
def query_data(query):
response = openai.Completion.create(
engine="text-davinci-002",
prompt=f"{data}\n\n{query}",
temperature=0.5,
max_tokens=100
)
# Extract the generated text and print it
answer = response.choices[0].text.strip()
print(f"Query: {query}\nAnswer: {answer}")
# Now you can query your data like this:
query = input("Enter your query: ")
query_data(query) | [
"PLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | ankitrana2709/CS50 | chat~chatter.py | import openai
import os
# Set up the OpenAI API key
openai.api_key = "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"
# Set up the initial conversation prompt
conversation_prompt = "Hello, I'm a chatbot. Which article you want today?"
# Set up the API parameters
model_engine = "davinci"
max_tokens = 150
# Start the conversation loop
while True:
# Get the user's message
article = "Write an article about "
user_message = input("You: ")
# Set up the prompt for the API request
prompt = f"{conversation_prompt}\n\nUser: {article + user_message}\nBot:"
# Generate the bot's response using the API
response = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=max_tokens
)
# Get the bot's response from the API response
bot_message = response.choices[0].text.strip()
# Print the bot's response
print("Bot:", bot_message)
| [
"Hello, I'm a chatbot. Which article you want today?\n\nUser: Write an article about PLACEHOLDER\nBot:",
"Hello, I'm a chatbot. Which article you want today?",
"conversation_prompt12842c18-8928-44a0-8550-527da9fe5a43\n\nUser: Write an article about PLACEHOLDER\nBot:"
] |
2024-01-10 | msuliot/open_ai_fine_tuning | full_automatic.py | import requests
import time
import openai
import datetime
import json
import sys
# get keys from .env file
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def validate_file(filename):
try:
with open(filename, 'r') as file:
lines = file.readlines()
the_file = [json.loads(line) for line in lines]
return True
except Exception as e:
print("Error reading file, invalid format: ", e)
return False
def upload_file():
ft_file = openai.File.create(file=open("data.jsonl", "rb"), purpose='fine-tune')
return ft_file["id"]
def create_model(training_file_id):
ft_job = openai.FineTuningJob.create(training_file=training_file_id, model="gpt-3.5-turbo-0613")
return ft_job["id"]
def test_model(model_id, prompt):
completion = openai.ChatCompletion.create(
model=model_id,
temperature=0.0,
messages=[
{"role": "system", "content": "You are a helpful and professional customer service representative"},
{"role": "user", "content": prompt},
]
)
print(completion.choices[0].message)
##############################
######### Main Logic #########
##############################
def main():
# Validate the file
is_file_valid = validate_file("data.jsonl")
if is_file_valid == False:
print("File is not valid")
sys.exit()
print("\nFile is valid and now uploading...\n")
# Upload the file and wait for it to be processed
file_id = upload_file()
sleep_time_file = 5
looptime = 0
while True:
print(f"Waiting for OpenAI to process the file... {looptime}")
file_status = openai.File.retrieve(file_id)
if file_status["status"] == "processed":
print(f"\nFile processed: {file_status['id']}\n")
break
looptime += sleep_time_file
time.sleep(sleep_time_file)
# Create the finetuned model and wait for it to be processed
fine_tuning_job = create_model(file_id)
model_id = ""
looptime = 0
sleep_time_model = 30
while True:
print(f"Waiting for OpenAI to create the model... {looptime}")
model_status = openai.FineTuningJob.retrieve(fine_tuning_job)
if model_status["status"] == "succeeded":
model_id = model_status["fine_tuned_model"]
break
looptime += sleep_time_model
time.sleep(sleep_time_model)
print(f"\nModel created: {model_id}")
# Test the model
print("\nTesting the new OpenAI model\n")
prompt = "Where do I mail my check?"
print(f"Prompt: {prompt}")
test_model(model_id, prompt)
if __name__ == "__main__":
main() | [
"Where do I mail my check?",
"You are a helpful and professional customer service representative"
] |
2024-01-10 | msuliot/open_ai_fine_tuning | step2_upload_file.py | import openai
# get keys from .env file
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def main():
ft_file = openai.File.create(file=open("data.jsonl", "rb"), purpose='fine-tune')
print(ft_file)
print("Here is the training file id you need for Step 4 ==> ", ft_file["id"])
if __name__ == "__main__":
main()
| [] |
2024-01-10 | msuliot/open_ai_fine_tuning | step5_model_validation.py | import openai
import datetime
# get keys from .env file
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def pretty_table(f):
print(f"\n{'ID':<33} {'Created At':<22} {'Finished At':<22} {'Status':<13} {'Fine Tuned Model'} ")
print('-' * 140)
for job in f['data']:
created_at = datetime.datetime.fromtimestamp(job['created_at']).strftime('%Y-%m-%d %H:%M:%S')
finished_at = ""
if job['finished_at']:
finished_at = datetime.datetime.fromtimestamp(job['finished_at']).strftime('%Y-%m-%d %H:%M:%S')
print(f"{job['id']:<33} {created_at:<22} {finished_at:<22} {job['status']:<13} {job['fine_tuned_model']} ")
def main():
job_list = openai.FineTuningJob.list(limit=25)
# print(job_list)
pretty_table(job_list)
if __name__ == "__main__":
main() | [] |
2024-01-10 | msuliot/open_ai_fine_tuning | cleanup.py | import openai
# get keys from .env file
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def delete_file(file_id):
try:
openai.File.delete(file_id)
print("File deleted successfully")
except Exception as e:
print("Error deleting file: ", e)
def delete_finetune_model(model_id):
try:
openai.Model.delete(model_id)
print("Model has been deleted successfully")
except Exception as e:
print("Error deleting model: ", e)
def download_file(file_id, filename="downloaded.jsonl"):
try:
# Download the file
the_file = openai.File.download(file_id)
data_str = the_file.decode('utf-8')
with open(filename, 'w') as file:
file.write(data_str)
print("File downloaded successfully")
except Exception as e:
print("Error downloading file: ", e)
def delete_all_files():
file_list = openai.File.list()
for file in file_list['data']:
print(file['id'], file['purpose'], file['status'])
delete_file(file['id'])
def delete_all_models():
model_list = openai.FineTuningJob.list(limit=50)
for model in model_list['data']:
print(model['status'], model['fine_tuned_model'])
delete_finetune_model(model['fine_tuned_model'])
def list_files():
print("\n===== File List =====")
file_list = openai.File.list()
for file in file_list['data']:
print(file['id'], file['purpose'], file['status'])
def list_models():
print("\n===== Model List =====")
model_list = openai.FineTuningJob.list(limit=50)
for model in model_list['data']:
print(model['status'], model['fine_tuned_model'])
# delete_file("file-5tZ09GT4pGuTAYuBmRHjYgGO")
# delete_all_files()
#
# delete_finetune_model("ft:gpt-3.5-turbo-0613:michael-ai::7rugXpfD")
# delete_all_models()
#
# download_file("file-O4IZuDzXVaPvE5XUPdZpwKJg","down.jsonl")
list_files()
list_models() | [] |
2024-01-10 | msuliot/open_ai_fine_tuning | step3_file_validation.py | import openai
import datetime
# get keys from .env file
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def pretty_table(f):
print(f"\n{'ID':<33} {'Purpose':<20} {'Status':<12} {'Created At'}")
print('-' * 88)
for file in f['data']:
created_at = datetime.datetime.fromtimestamp(file['created_at']).strftime('%Y-%m-%d %H:%M:%S')
print(f"{file['id']:<33} {file['purpose']:<20} {file['status']:<12} {created_at}")
def main():
file_list = openai.File.list(limit=25)
# print(file_list)
pretty_table(file_list)
if __name__ == "__main__":
main() | [] |
2024-01-10 | msuliot/open_ai_fine_tuning | step4_create_finetuned_model.py | import openai
# get keys from .env file
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def main():
##### You will need to replace the TRAINING_FILE_ID with the one you got from the previous step.
ft_job = openai.FineTuningJob.create(training_file="TRAINING_FILE_ID", model="gpt-3.5-turbo-0613")
print(ft_job)
if __name__ == "__main__":
main()
| [] |