date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | SBrandeis/transformers | src~transformers~models~openai~modeling_tf_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 OpenAI GPT model."""
from dataclasses import dataclass
from typing import Optional, Tuple
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput, TFSequenceClassifierOutput
from ...modeling_tf_utils import (
TFCausalLanguageModelingLoss,
TFConv1D,
TFPreTrainedModel,
TFSequenceClassificationLoss,
TFSequenceSummary,
TFSharedEmbeddings,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_openai import OpenAIGPTConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "OpenAIGPTConfig"
_TOKENIZER_FOR_DOC = "OpenAIGPTTokenizer"
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"openai-gpt",
# See all OpenAI GPT models at https://huggingface.co/models?filter=openai-gpt
]
class TFAttention(tf.keras.layers.Layer):
def __init__(self, nx, n_ctx, config, scale=False, **kwargs):
super().__init__(**kwargs)
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert (
n_state % config.n_head == 0
), f"Hidden dimension {n_state} not dividable by number of heads {config.n_head}"
self.n_ctx = n_ctx
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.output_attentions = config.output_attentions
self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn")
self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj")
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
pass
@staticmethod
def causal_attention_mask(nd, ns, dtype):
"""
1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]),
-1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:, None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False):
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
if self.scale:
dk = tf.cast(shape_list(k)[-1], tf.float32) # scale attention_scores
w = w / tf.math.sqrt(dk)
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = tf.nn.softmax(w, axis=-1)
w = self.attn_dropout(w, training=training)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [tf.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = tf.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
return tf.reshape(x, new_x_shape)
def split_heads(self, x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
x = tf.reshape(x, new_x_shape)
return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
def call(self, x, attention_mask, head_mask, output_attentions, training=False):
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query)
key = self.split_heads(key)
value = self.split_heads(value)
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a, training=training)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class TFMLP(tf.keras.layers.Layer):
def __init__(self, n_state, config, **kwargs):
super().__init__(**kwargs)
nx = config.n_embd
self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc")
self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj")
self.act = get_tf_activation("gelu")
self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)
def call(self, x, training=False):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
h2 = self.dropout(h2, training=training)
return h2
class TFBlock(tf.keras.layers.Layer):
def __init__(self, n_ctx, config, scale=False, **kwargs):
super().__init__(**kwargs)
nx = config.n_embd
self.attn = TFAttention(nx, n_ctx, config, scale, name="attn")
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
self.mlp = TFMLP(4 * nx, config, name="mlp")
self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2")
def call(self, x, attention_mask, head_mask, output_attentions, training=False):
output_attn = self.attn(x, attention_mask, head_mask, output_attentions, training=training)
a = output_attn[0] # output_attn: a, (attentions)
n = self.ln_1(x + a)
m = self.mlp(n, training=training)
h = self.ln_2(n + m)
outputs = [h] + output_attn[1:]
return outputs # x, (attentions)
@keras_serializable
class TFOpenAIGPTMainLayer(tf.keras.layers.Layer):
config_class = OpenAIGPTConfig
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
self.config = config
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.return_dict = config.use_return_dict
self.num_hidden_layers = config.n_layer
self.vocab_size = config.vocab_size
self.n_embd = config.n_embd
self.tokens_embed = TFSharedEmbeddings(
config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="tokens_embed"
)
self.positions_embed = tf.keras.layers.Embedding(
config.n_positions,
config.n_embd,
embeddings_initializer=get_initializer(config.initializer_range),
name="positions_embed",
)
self.drop = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [TFBlock(config.n_ctx, config, scale=True, name="h_._{}".format(i)) for i in range(config.n_layer)]
def get_input_embeddings(self):
return self.tokens_embed
def set_input_embeddings(self, value):
self.tokens_embed.weight = value
self.tokens_embed.vocab_size = shape_list(value)[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
inputs["input_ids"] = tf.reshape(inputs["input_ids"], [-1, input_shape[-1]])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["position_ids"] is None:
inputs["position_ids"] = tf.range(input_shape[-1], dtype=tf.int32)[tf.newaxis, :]
if inputs["attention_mask"] is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
inputs["attention_mask"] = inputs["attention_mask"][:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
inputs["attention_mask"] = tf.cast(inputs["attention_mask"], tf.float32)
inputs["attention_mask"] = (1.0 - inputs["attention_mask"]) * -10000.0
else:
inputs["attention_mask"] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if inputs["head_mask"] is not None:
raise NotImplementedError
else:
inputs["head_mask"] = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
inputs["position_ids"] = tf.reshape(inputs["position_ids"], [-1, shape_list(inputs["position_ids"])[-1]])
if inputs["inputs_embeds"] is None:
inputs["inputs_embeds"] = self.tokens_embed(inputs["input_ids"], mode="embedding")
position_embeds = self.positions_embed(inputs["position_ids"])
if inputs["token_type_ids"] is not None:
inputs["token_type_ids"] = tf.reshape(
inputs["token_type_ids"], [-1, shape_list(inputs["token_type_ids"])[-1]]
)
token_type_embeds = self.tokens_embed(inputs["token_type_ids"], mode="embedding")
else:
token_type_embeds = 0
hidden_states = inputs["inputs_embeds"] + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=inputs["training"])
output_shape = input_shape + [shape_list(hidden_states)[-1]]
all_attentions = () if inputs["output_attentions"] else None
all_hidden_states = () if inputs["output_hidden_states"] else None
for i, block in enumerate(self.h):
if inputs["output_hidden_states"]:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block(
hidden_states,
inputs["attention_mask"],
inputs["head_mask"][i],
inputs["output_attentions"],
training=inputs["training"],
)
hidden_states = outputs[0]
if inputs["output_attentions"]:
all_attentions = all_attentions + (outputs[1],)
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if inputs["output_hidden_states"]:
all_hidden_states = all_hidden_states + (hidden_states,)
if inputs["output_attentions"]:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
if not inputs["return_dict"]:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = OpenAIGPTConfig
base_model_prefix = "transformer"
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
@dataclass
class TFOpenAIGPTDoubleHeadsModelOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
logits: tf.Tensor = None
mc_logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
OPENAI_GPT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.OpenAIGPTTokenizer`. See
:func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="openai-gpt",
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.transformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
# Copied from transformers.models.distilbert.modeling_tf_distilbert.TFDistilBertModel.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel, TFCausalLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
def get_output_embeddings(self):
return self.get_input_embeddings()
def set_output_embeddings(self, value):
self.set_input_embeddings(value)
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="openai-gpt",
output_type=TFCausalLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,
config.vocab_size - 1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
transformer_outputs = self.transformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = transformer_outputs[0]
logits = self.transformer.tokens_embed(hidden_states, mode="linear")
loss = None
if inputs["labels"] is not None:
# shift labels to the left and cut last logit token
logits = logits[:, :-1]
labels = inputs["labels"][:, 1:]
loss = self.compute_loss(labels, logits)
if not inputs["return_dict"]:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFCausalLMOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
input embeddings, the classification head takes as input the input of a specified classification token index in the
input sequence).
""",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
config.num_labels = 1
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
self.multiple_choice_head = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="multiple_choice_head"
)
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFOpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
mc_token_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input):
Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) -
1]``.
Return:
Examples::
>>> import tensorflow as tf
>>> from transformers import OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel
>>> tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
>>> model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
>>> # Add a [CLS] to the vocabulary (we should train it also!)
>>> tokenizer.add_special_tokens({'cls_token': '[CLS]'})
>>> model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
>>> print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> encoding = tokenizer(choices, return_tensors="tf")
>>> inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()}
>>> inputs["mc_token_ids"]= tf.constant([inputs["input_ids"].shape[-1] - 1, inputs["input_ids"].shape[-1] - 1])[None, :] # Batch size 1
>>> outputs = model(inputs)
>>> lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
mc_token_ids=mc_token_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
input_shapes = shape_list(inputs["input_ids"])
else:
input_shapes = shape_list(inputs["inputs_embeds"])[:-1]
seq_length = input_shapes[-1]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
transformer_outputs = self.transformer(
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
inputs["head_mask"],
inputs["inputs_embeds"],
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = transformer_outputs[0]
hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
mc_logits = self.multiple_choice_head(hidden_states, inputs["mc_token_ids"], training=inputs["training"])
mc_logits = tf.squeeze(mc_logits, axis=-1)
if not inputs["return_dict"]:
return (lm_logits, mc_logits) + transformer_outputs[1:]
return TFOpenAIGPTDoubleHeadsModelOutput(
logits=lm_logits,
mc_logits=mc_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"mc_token_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFOpenAIGPTDoubleHeadsModelOutput(
logits=output.logits, mc_logits=output.mc_logits, hidden_states=hs, attentions=attns
)
@add_start_docstrings(
"""
The OpenAI GPT Model transformer with a sequence classification head on top (linear layer).
:class:`~transformers.TFOpenAIGPTForSequenceClassification` uses the last token in order to do the classification,
as other causal models (e.g. GPT-2) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
:obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each
row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take
the last value in each row of the batch).
""",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTForSequenceClassification(TFOpenAIGPTPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.score = tf.keras.layers.Dense(
config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="score",
use_bias=False,
)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="openai-gpt",
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,
config.vocab_size - 1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
transformer_outputs = self.transformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
logits_shape = shape_list(logits)
in_logits = None
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if inputs["input_ids"] is not None:
sequence_lengths = (
tf.reduce_sum(
tf.cast(tf.math.not_equal(inputs["input_ids"], self.config.pad_token_id), tf.int32),
-1,
keepdims=False,
)
- 1
)
def get_seq_element(sequence_position, input_batch):
return tf.strided_slice(
input_batch, [sequence_position, 0], [sequence_position + 1, input_batch.shape[-1]], [1, 1]
)
result = tf.map_fn(
fn=lambda t: get_seq_element(t[0], t[1]), elems=[sequence_lengths, logits], dtype="float"
)
in_logits = tf.reshape(result, [logits_shape[0], logits_shape[-1]])
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
loss = None
if inputs["labels"] is not None:
if input_ids is not None:
batch_size, sequence_length = shape_list(inputs["input_ids"])[:2]
else:
batch_size, sequence_length = shape_list(inputs["inputs_embeds"])[:2]
assert (
self.config.pad_token_id is not None or batch_size == 1
), "Cannot handle batch sizes > 1 if no padding token is defined."
if not tf.is_tensor(sequence_lengths):
in_logits = logits[0:batch_size, sequence_lengths]
loss = self.compute_loss(
tf.reshape(inputs["labels"], [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels])
)
pooled_logits = in_logits if in_logits is not None else logits
if not inputs["return_dict"]:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=pooled_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
| [] |
2024-01-10 | Lucete28/TradeTrend | TT_runfile~update_naver_raw.py | from airflow.models.variable import Variable
import openai
import pandas as pd
openai.api_key = Variable.get("gpt_api_key")
Target_list = Variable.get("Target_list")
values = [tuple(item.strip("()").split(",")) for item in Target_list.split("),")]
values = [(x[0].strip(), x[1].strip()) for x in values]
err_report = []
for val in values:
gpt_ans = []
temp_df = pd.read_csv(f'/home/jhy/code/TradeTrend/data/{val[0]}/{val[0]}_temp4.csv')
raw_df = pd.read_csv(f'/home/jhy/code/TradeTrend/data/{val[0]}/{val[0]}_news_raw2.csv')
ans_list = raw_df.iloc[:, 1]
while True:
condition_satisfied = True # 모든 조건이 만족되었는지 여부를 추적하는 플래그 변수
for i, ans in enumerate(ans_list):
try:
if len(str(ans)) > 4 or (float(ans) > 1 or float(ans) < 0):
messages = []
a = temp_df.iloc[i, 1]
content = f'{a} {val[1]} 관련 뉴스기사 제목인데 {val[1]} 주식에 미칠 긍정도의 평균을 0에서 1사이 소숫점 두자리까지 나타내 float값만'
messages.append({"role": "user", "content": content})
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
chat_response = completion.choices[0].message.content
gpt_ans.append(chat_response)
messages.append({"role": "assistant", "content": chat_response})
# raw_df에서 해당 값을 새로운 값으로 업데이트합니다.
raw_df.iloc[i, 1] = chat_response
raw_df.to_csv(f'/home/jhy/code/TradeTrend/data/{val[0]}/{val[0]}_news_raw2.csv', index=False)
condition_satisfied = False # 조건이 하나 이상의 항목에 대해 만족되지 않았음을 표시합니다.
except: # 에러 발생
print(i, ans)
err_report.append(ans)
condition_satisfied = False
if condition_satisfied:
break # 모든 항목의 조건이 만족되었을 경우 반복문을 종료합니다.
for err in err_report:
if err_report.count(err) >=5:
print("5회 이상 같은 err 발생")
break
| [
"PLACEHOLDER PLACEHOLDER 관련 뉴스기사 제목인데 PLACEHOLDER 주식에 미칠 긍정도의 평균을 0에서 1사이 소숫점 두자리까지 나타내 float값만"
] |
2024-01-10 | LilithHafner/ai | integrated_ai.py | import openai
openai.api_key = "sk-..."
# GPT AI
def ai(prompt):
response = openai.Completion.create(
engine="code-davinci-002",
prompt=prompt,
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop="<end>"
)
return response.choices[0].text
# Subprocesses
def user(prompt):
return input(prompt+"\n*>> ")
import traceback
def python_eval(prompt):
try:
return str(eval(prompt, globals()))
except:
return traceback.format_exc()
def python_exec(prompt):
try:
return str(exec(prompt, globals()))
except:
return traceback.format_exc()
subprocesses = [
("<user output>", "<user input>", user),
("<python eval>", "<python eval result>", python_eval),
("<python exec>", "<python exec result>", python_exec),
]
def subprocess(s):
for start, end, func in subprocesses:
if s.startswith(start):
return end + func(s[len(start):])
# print("The AI made an unsupported query:", s, "", sep="\n")
return "<error>unknown tag"
## Training data
prompt = """This is a question and answer bot that has oracles to various external tools including python, google, and others
<user input>what time is it<end>
<pyhton eval>time.ctime()<end>
<python eval result>Traceback (most recent call last):
File "/Users/x/Documents/integrated_ai.py", line 26, in python
return str(eval(prompt, globals(), locals()))
File "<string>", line 1, in <module>
NameError: name 'time' is not defined<end>
<python exec>import time<end>
<python exec result>None<end>
<python eval>time.ctime()<end>
<user output>The time is Sun Apr 24 18:01:32 2022<end>
<user input>what is the weather in New York<end>
<google>weather in New York<end>
<google result>Sunny
53°F°C
Precipitation: 1%
Humidity: 52%
Wind: 7 mph
New York, NY
Sunday 6:00 PM
Sunny
TemperaturePrecipitationWind<end>
<user output>The weather in New York is Sunny<end>
<user input>is it warm in chicago?<end>
<google>weather in chicago<end>
result: Cloudy
70°F°C
Precipitation: 5%
Humidity: 65%
Wind: 19 mph
Chicago, IL
Sunday 6:00 PM
Cloudy
TemperaturePrecipitationWind<end>
<user output>It is warm in chicago<end>
<user input>is 1729 prime?<end>
<python eval>is_prime(1729)<end>
<python eval result>Traceback (most recent call last):
File "/Users/x/Documents/integrated_ai.py", line 26, in python_eval
return str(eval(prompt, globals()))
File "<string>", line 1, in <module>
NameError: name 'is_prime' is not defined<end>
<python exec>def is_prime(n):
if n <= 1:
return False
for i in range(2, n):
if n % i == 0:
return False
return True<end>
<python exec result>None<end>
<python eval>is_prime(1729)<end>
<python eval result>False<end>
<user output>1729 is not prime<end>
<user input>Stop using google<end>
<user output>Google disabled.<end>
<user input>What's the weather?<end>
<user output>I cannot answer that question without google<end>
<user input>Name 7 edibe mushrooms<end>
<user output>Pleurotus, Lentinula edodes, Shiitake mushroom, Auricularia auricula-judae, Volvariella volvacea, Flammulina velutipes, Tremella fuciformis<end>"""
# Main loop
def kernal(verbose=True):
global prompt
prompt += "<user input>" + user("Welcome!") + "<end>\n"
while True:
call = ai(prompt)
if verbose:
print(call + "<end>")
prompt += call + "<end>\n"
if call.startswith("<exit>"):
return
result = subprocess(call)
if verbose:
print(result + "<end>")
prompt += result + "<end>\n"
if __name__ == "__main__":
kernal()
| [
"This is a question and answer bot that has oracles to various external tools including python, google, and others\n\n<user input>what time is it<end>\n<pyhton eval>time.ctime()<end>\n<python eval result>Traceback (most recent call last):\n File \"/Users/x/Documents/integrated_ai.py\", line 26, in python\n return str(eval(prompt, globals(), locals()))\n File \"<string>\", line 1, in <module>\nNameError: name 'time' is not defined<end>\n<python exec>import time<end>\n<python exec result>None<end>\n<python eval>time.ctime()<end>\n<user output>The time is Sun Apr 24 18:01:32 2022<end>\n<user input>what is the weather in New York<end>\n<google>weather in New York<end>\n<google result>Sunny\n53°F°C\nPrecipitation: 1%\nHumidity: 52%\nWind: 7 mph\nNew York, NY\nSunday 6:00 PM\nSunny\nTemperaturePrecipitationWind<end>\n<user output>The weather in New York is Sunny<end>\n<user input>is it warm in chicago?<end>\n<google>weather in chicago<end>\nresult: Cloudy\n70°F°C\nPrecipitation: 5%\nHumidity: 65%\nWind: 19 mph\nChicago, IL\nSunday 6:00 PM\nCloudy\nTemperaturePrecipitationWind<end>\n<user output>It is warm in chicago<end>\n<user input>is 1729 prime?<end>\n<python eval>is_prime(1729)<end>\n<python eval result>Traceback (most recent call last):\n File \"/Users/x/Documents/integrated_ai.py\", line 26, in python_eval\n return str(eval(prompt, globals()))\n File \"<string>\", line 1, in <module>\nNameError: name 'is_prime' is not defined<end>\n<python exec>def is_prime(n):\n if n <= 1:\n return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True<end>\n<python exec result>None<end>\n<python eval>is_prime(1729)<end>\n<python eval result>False<end>\n<user output>1729 is not prime<end>\n<user input>Stop using google<end>\n<user output>Google disabled.<end>\n<user input>What's the weather?<end>\n<user output>I cannot answer that question without google<end>\n<user input>Name 7 edibe mushrooms<end>\n<user output>Pleurotus, Lentinula edodes, Shiitake mushroom, Auricularia auricula-judae, Volvariella volvacea, Flammulina velutipes, Tremella fuciformis<end>",
"<end>\n",
"<user input>",
"PLACEHOLDER<end>\n"
] |
2024-01-10 | Kororinpas/Lit_Tool | document_util.py | def get_split_documents(docs, chunk_size, chunk_overlap):
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,chunk_overlap=chunk_overlap)
return text_splitter.split_documents(docs) | [] |
2024-01-10 | Kororinpas/Lit_Tool | literature_test.py | import streamlit as st
import sys
class StreamlitWriter:
def write(self, text):
st.write(text.strip())
### This the function about streamlit
def Vector_Databse():
st.write("Vector Database")
choose = st.radio("Choose using an existing database or upload a new one.",
["Using an existing one", "Uploading a new one"])
import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if choose == "Using an existing one":
persist_dirctory = st.text_input("Enter the persist_dirctory")
collection = st.text_input("Enter the collection")
if st.button('Confirm'):
st.session_state['persist_dirctory'] = persist_dirctory
st.session_state['collection'] = collection
vectorstore,embeddings = load_vectorstore(persist_directory=st.session_state['persist_dirctory'],
collection_name = st.session_state['collection'],
model_name = 'sentence-transformers/all-mpnet-base-v2',
device = device)
st.session_state['vectorstore'] = vectorstore
st.session_state['embeddings'] = embeddings
print('The vectorstore load successfully')
else:
path = st.text_input("Enter the path")
persist_dirctory = st.text_input("Enter the persist_dirctory")
collection = st.text_input("Enter the collection")
if st.button('Confirm'):
st.session_state['path'] = path
st.session_state['persist_dirctory'] = persist_dirctory
st.session_state['collection'] = collection
split_docs = load_pdf(path = st.session_state['path'],
openai_api_key=st.session_state['openai_api_key'],
chunk_size=st.session_state['chunk_size'],
chunk_overlap=st.session_state['chunk_overlap'])
vectorstore,embeddings = generate_vectorstore(split_docs = split_docs,
model_name = 'sentence-transformers/all-mpnet-base-v2',
persist_directory = st.session_state['persist_dirctory'],
collection_name = st.session_state['collection'],
device=device)
st.session_state['vectorstore'] = vectorstore
st.session_state['embeddings'] =embeddings
print('The vectorstore load successfully')
def Parameters():
import os
openai_api_key = st.text_input('Enter your Openapi_api_key')
if st.button('Confirm'):
if openai_api_key == '':
st.session_state['openai_api_key'] = os.environ.get('openai_api_key')
else:
st.session_state['openai_api_key'] = openai_api_key
chunk_size = st.text_input('Enter your chunk_size')
if st.button('Confirm_1'):
if chunk_size== '':
st.session_state['chunk_size'] = 1500
chunk_overlap = st.text_input('Enter your chunk_overlap')
if st.button('Confirm_2'):
if chunk_overlap == '':
st.session_state['chunk_overlap'] = 0
def Docs():
col1,col2 = st.columns([1,1])
with col1:
output_text = ''
vectorstore = st.session_state['vectorstore']
edited_output_text = st.text_area("输出文本", value=output_text, height=600)
if st.button("Confirm paragraph"):
output_text = edited_output_text
k = st.slider("Select the number of sentences to generate", min_value=1, max_value=5, value=1)
query = st.text_input("Input the query")
if st.button("Confirm query"):
output, docs = get_chain_output(query=query,
vectordb=vectorstore,
k=k,
openai_api_key=st.session_state['openai_api_key'])
final_json = run_text_match(output=output,
query=query,
docs=docs,
k=k,
embeddings=st.session_state['embeddings'])
st.session_state['final_json'] = final_json
with col2:
if 'final_json' in st.session_state:
final_json = st.session_state['final_json']
selected_sentence = st.selectbox("Select a sentence", final_json)
if st.button('Confirm sentence'):
process_selected_sentence(selected_sentence)
###This is the function about Langchain
###Loading PDF part
def load_pdf(path, openai_api_key, chunk_size, chunk_overlap):
from langchain.document_loaders import PyMuPDFLoader, DirectoryLoader, UnstructuredPDFLoader
#from detectron2.config import get_cfg
from PyPDF2 import PdfReader
#cfg = get_cfg()
#cfg.MODEL.DEVICE = 'gpu'
import os
file_names = os.listdir(path)
pdf_file_names = [path + '/' + file_name for file_name in file_names if file_name.endswith('.pdf')]
docs = []
import re
for pdf in pdf_file_names:
source = extract_doi(pdf)
if source != 'None':
doc = PyMuPDFLoader(pdf).load()
for element in doc:
element.metadata = source
element.page_content = re.sub('\n+', ' ', element.page_content.strip())
docs.append(element)
else:
doc = PyMuPDFLoader(pdf).load()
print(f"{pdf} is not identified! Using other strategy!!")
source = extract_doi_llm(doc, openai_api_key)
if source != 'None':
for element in doc:
element.metadata = source
for element in doc:
element.page_content = re.sub('\n+', ' ', element.page_content.strip())
docs.append(element)
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
split_docs = text_splitter.split_documents(docs)
return split_docs
def get_info(path):
from PyPDF2 import PdfReader
with open(path, 'rb') as f:
pdf = PdfReader(f)
info = pdf.metadata
return info
def extract_doi(path):
source = 0
info = get_info(path)
if '/doi' in info:
doi = info['/doi']
elif '/Subject' in info:
Subject = info['/Subject']
if 'doi:' in Subject:
Subject = Subject.split('doi:')
doi = Subject[1]
else:
source = 'None'
elif '/WPS-ARTICLEDOI' in info:
doi = info['/WPS-ARTICLEDOI']
else:
source = 'None'
if source != 'None':
import habanero
import time
citation = habanero.cn.content_negotiation(ids=doi, format='bibentry')
time.sleep(5)
import bibtexparser
citation = bibtexparser.loads(citation)
citation = citation.entries[0]
source = {'author': citation['author'],
'year': citation['year'],
'title': citation['title'],
'journal': citation['journal'],
}
return source
def extract_doi_llm(doc,openai_api_key):
import re
doc[0].page_content = re.sub('\n+',' ',doc[0].page_content.strip())
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500,chunk_overlap = 50)
split_docs = text_splitter.split_documents(doc)
abstract = split_docs[0]
doi = extract_chain(abstract,openai_api_key)
if doi != 'None' and doi!= None:
import habanero
import time
citation = habanero.cn.content_negotiation(ids = doi,format='bibentry')
time.sleep(5)
import bibtexparser
citation = bibtexparser.loads(citation)
citation = citation.entries[0]
source = {'author':citation['author'],
'year':citation['year'],
'title':citation['title'],
'journal':citation['journal'],
}
return source
else:
source = 'None'
return source
def extract_chain(abstract, openai_api_key):
from kor.extraction import create_extraction_chain
from kor.nodes import Object, Text, Number
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
openai_api_key=openai_api_key,
temperature=0,
)
schema = Object(
id="doi",
description="doi is a digital identifier.It typically starts with 10. followed by a numeric prefix, such as 10.1000/182.",
attributes=[
Text(
id="doi",
description='doi is a digital identifier. It typically starts with "10." followed by a numeric prefix, such as 10.1000/182.',
examples=[
(
'American Economic Journal: Economic Policy 2015, 7(4): 223–242 http://dx.doi.org/10.1257/pol.20130367 223 Water Pollution Progress at Borders: The',
'http://dx.doi.org/10.1257/pol.20130367'),
(
'Environment and Development Economics (2020), 1–17 doi:10.1017/S1355770X2000025X EDE RESEARCH ARTICLE Political incentives, Party Congress, and pollution cycle: empirical evidence from China Zhihua Tian,1 and Yanfang Tian2* 1School of Economics, Zhejiang University of Technology, Hangzhou',
'10.1017/S1355770X2000025X')
],
many=True
)
],
many=False
)
chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json')
output = chain.predict_and_parse(text=abstract.page_content)
if 'doi' not in output['data']:
print(f"LLM strategy failed!!{abstract.metadata['source']} Please manually add it!!")
source = 'None'
return source
else:
if output['data']['doi']['doi'] == []:
print(f"LLM strategy failed!!{abstract.metadata['source']} Please manually add it!!")
source = 'None'
return source
else:
doi = output['data']['doi']['doi'][0]
if 'doi=' in doi:
doi = doi.split('doi=')[1]
return doi
###Loading the database
def generate_vectorstore(split_docs, device, model_name, persist_directory, collection_name):
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
model_kwargs = {'device': device}
model_name = model_name
embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
persist_directory = persist_directory
collection_name = collection_name
vectorstore = Chroma.from_documents(split_docs, embeddings, collection_name=collection_name,
persist_directory=persist_directory)
vectorstore.persist()
return vectorstore,embeddings
def load_vectorstore(persist_directory,device,model_name,collection_name):
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
model_kwargs = {'device': device}
model_name = model_name
embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
vectordb = Chroma(collection_name=collection_name,
persist_directory=persist_directory,
embedding_function=embeddings)
return vectordb,embeddings
###Using Langchain and match
def get_chain_output(query, vectordb, k, openai_api_key):
docs = vectordb.similarity_search(query, 6, include_metadata=True)
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(openai_api_key=openai_api_key, temperature=0, model_name="gpt-3.5-turbo")
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.llms import OpenAI
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field, validator
from typing import List, Union, Optional
class Sentence(BaseModel):
sentence: List[str] = Field(
description="The sentence in the given document which is the most similar to the query provided")
source: List[str] = Field(description="The meta source of the paper")
score: List[float] = Field(
description="The similarity score between the sentence selected and the query provided")
parser = PydanticOutputParser(pydantic_object=Sentence)
dic = {'1':"one",
"2":"two",
"3":"three",
"4":"four",
"5":"five"}
k = dic[str(k)]
question_template = f"""
Given the document and query, find {k} sentences in the document that are most similar in meaning to the query.
Return the sentences, the meta source of the sentences and the cosine similarity scores.
If no similar sentences is found, return the sentence with highest cosine siliarity scores.
"""
main_template = """
{query}
===========
{context}
===========
{format_instructions}
"""
question_template = question_template+main_template
from langchain.chains.question_answering import load_qa_chain
from langchain import LLMChain
PROMPT = PromptTemplate(template=question_template,
input_variables=['query', 'context'],
partial_variables={"format_instructions": parser.get_format_instructions()})
llm_chain = LLMChain(llm=llm, prompt=PROMPT)
output = llm_chain({"query": query, "context": docs})
return output, docs
def run_text_match(output, k,query, docs,embeddings):
import re
text = re.sub("\n+", "", output['text'])
import json
json_obj = json.loads(text)
if "properties" in json_obj:
print('No result was found, Using embedding searching strategy!!!')
split_docs = split_for_embedding(docs)
similar_sentence = search_cosine_similarity(query,k,split_docs, embeddings)
return similar_sentence
else:
json_obj = [{'sentence': json_obj['sentence'][i],
'source': json_obj['source'][i],
'score': json_obj['score'][i]} for i in range(k)]
return json_obj
def split_for_embedding(docs): ##输入docs(list),输出split_for embedding(list)
for_embedding = []
for content in docs:
new_content = content.page_content.replace('et al.', 'et al。')
new_content = new_content.split('.')
if 'source' in content.metadata:
meta_data = content.metadata['source']
else:
meta_data = content.metadata
for split_content in new_content:
split_content = split_content.replace('。', '.')
if len(split_content) < 30:
continue
else:
for_embedding.append({"content": split_content, "source": meta_data})
return for_embedding
def search_cosine_similarity(query, k,split_docs, embeddings): ##query-str,split_docs-list,embeddings-embeddings()
split_docs_content = [content['content'] for content in split_docs]
split_docs_content = list(set(split_docs_content))
embed_docs = embeddings.embed_documents(split_docs_content)
embed_query = embeddings.embed_query(query)
from openai.embeddings_utils import cosine_similarity
cos_index = []
for embed_doc in embed_docs:
cos_index.append(cosine_similarity(embed_doc, embed_query))
# 这边是根据大小建立索引
idx = sorted(range(len(cos_index)), key=lambda k: cos_index[k]) # 根据cos_index的大小进行排序
final_similar_list = []
for index in idx[-k:]:
unit = {}
unit['sentences'] = split_docs_content[index]
unit['source'] = split_docs[index]['source']
unit['score'] = cos_index[index]
final_similar_list.append(unit)
return final_similar_list
def main():
st.title("Literature Review Tool")
sys.stdout = StreamlitWriter()
# Create a toggle button to switch between pages
page = st.sidebar.radio("Choose a page", [ "Parameter","Vector Database","Docs"])
if page == "Parameter":
Parameters()
elif page == "Vector Database":
Vector_Databse()
elif page == "Docs":
Docs()
def my_function(input_text):
# 在此处添加您的处理逻辑
output_text = input_text.upper()
return output_text
def process_selected_sentence(selected_sentence):
# 在最终输出区域展示用户选择的句子
st.write(f"You selected: {selected_sentence}")
main() | [
"\n Given the document and query, find PLACEHOLDER sentences in the document that are most similar in meaning to the query. \n Return the sentences, the meta source of the sentences and the cosine similarity scores. \n If no similar sentences is found, return the sentence with highest cosine siliarity scores.\n ",
"format_instructions",
"PLACEHOLDERPLACEHOLDER",
"\n {query}\n ===========\n {context}\n ===========\n {format_instructions}\n\n ",
"context"
] |
2024-01-10 | Kororinpas/Lit_Tool | pdf_retrieval.py | from operator import itemgetter
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.document_loaders import DataFrameLoader, PyMuPDFLoader
import os
import fitz
import pandas as pd
import json
import ast
def fonts(doc, granularity=False, pages=2):
"""Extracts fonts and their usage in PDF documents.
:param doc: PDF document to iterate through
:type doc: <class 'fitz.fitz.Document'>
:param granularity: also use 'font', 'flags' and 'color' to discriminate text
:type granularity: bool
:rtype: [(font_size, count), (font_size, count}], dict
:return: most used fonts sorted by count, font style information
"""
styles = {}
font_counts = {}
pageCounter = 0
for page in doc:
blocks = page.get_text("dict")["blocks"]
for b in blocks: # iterate through the text blocks
if b['type'] == 0: # block contains text
for l in b["lines"]: # iterate through the text lines
for s in l["spans"]: # iterate through the text spans
if granularity:
identifier = "{0}_{1}_{2}_{3}".format(s['size'], s['flags'], s['font'], s['color'])
styles[identifier] = {'size': s['size'], 'flags': s['flags'], 'font': s['font'],
'color': s['color']}
else:
identifier = "{0}".format(s['size'])
styles[identifier] = {'size': s['size'], 'font': s['font']}
font_counts[identifier] = font_counts.get(identifier, 0) + 1 # count the fonts usage
pageCounter += 1
if pageCounter >= pages:
break
font_counts = sorted(font_counts.items(), key=itemgetter(1), reverse=True)
if len(font_counts) < 1:
raise ValueError("Zero discriminating fonts found!")
return font_counts, styles
def font_tags(font_counts, styles):
"""Returns dictionary with font sizes as keys and tags as value.
:param font_counts: (font_size, count) for all fonts occuring in document
:type font_counts: list
:param styles: all styles found in the document
:type styles: dict
:rtype: dict
:return: all element tags based on font-sizes
"""
p_style = styles[font_counts[0][0]] # get style for most used font by count (paragraph)
p_size = p_style['size'] # get the paragraph's size
# sorting the font sizes high to low, so that we can append the right integer to each tag
font_sizes = []
for (font_size, count) in font_counts:
font_sizes.append(float(font_size))
font_sizes.sort(reverse=True)
# aggregating the tags for each font size
idx = 0
size_tag = {}
for size in font_sizes:
idx += 1
if size == p_size:
idx = 0
size_tag[size] = '<p>'
if size > p_size:
size_tag[size] = '<h{0}>'.format(idx)
elif size < p_size:
size_tag[size] = '<s{0}>'.format(idx)
return size_tag
def get_pdf_raw_pages(doc, pages):
header_para = []
pageCounter = 0
for page in doc:
blocks = page.get_text("dict")["blocks"]
header_para.append(blocks)
pageCounter += 1
if pageCounter >= pages:
break
return header_para
def headers_para(doc, size_tag, pages=2):
"""Scrapes headers & paragraphs from PDF and return texts with element tags.
:param doc: PDF document to iterate through
:type doc: <class 'fitz.fitz.Document'>
:param size_tag: textual element tags for each size
:type size_tag: dict
:rtype: list
:return: texts with pre-prended element tags
"""
header_para = [] # list with headers and paragraphs
first = True # boolean operator for first header
previous_s = {} # previous span
pageCounter = 0
for page in doc:
blocks = page.get_text("dict")["blocks"]
for b in blocks: # iterate through the text blocks
# header_para.append("<section_block>")
if b['type'] == 0: # this block contains text
# REMEMBER: multiple fonts and sizes are possible IN one block
block_string = "" # text found in block
for l in b["lines"]: # iterate through the text lines
for s in l["spans"]: # iterate through the text spans
if s['text'].strip(): # removing whitespaces:
if first:
previous_s = s
first = False
block_string = size_tag[s['size']] + s['text']
else:
if s['size'] == previous_s['size']:
if block_string and all((c == "|") for c in block_string):
# block_string only contains pipes
block_string = size_tag[s['size']] + s['text']
if block_string == "":
# new block has started, so append size tag
block_string = size_tag[s['size']] + s['text']
else: # in the same block, so concatenate strings
block_string += " " + s['text']
else:
header_para.append(block_string)
block_string = size_tag[s['size']] + s['text']
previous_s = s
# new block started, indicating with a pipe
block_string += "|"
# header_para.append("<text_block>")
header_para.append(block_string)
# header_para.append("<text_block_end>")
# header_para.append("<section_block_end>")
pageCounter += 1
if pageCounter >= pages:
break
return header_para
def get_pdf_first_page_txt(pdf_path, pages=2):
doc = fitz.open(pdf_path)
font_counts, styles = fonts(doc, granularity=False, pages=pages)
size_tag = font_tags(font_counts, styles)
return headers_para(doc, size_tag, pages)
def get_pdf_pages(pdf_path, pages=2):
docs = PyMuPDFLoader(pdf_path).load()
return docs[:pages]
# texts = []
# for doc in docs[:pages]:
# texts.append(doc.page_content)
# return texts
def get_pdf_page_metadata(pdf_path, pages):
pdf_first_page_txt = get_pdf_first_page_txt(pdf_path, pages)
template = """
I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting
specific details, namely: article title, author, abstract and keywords section. Please be aware that if you encounter
JEL classifications such as C12 and P34, kindly ignore them and refrain from including them in the abstract and keywords.
{format_instructions}
Wrap your final output as a json objects
INPUT:
{pdf_first_page_txt}
YOUR RESPONSE:
"""
response_schemas = [
ResponseSchema(name="title", description="extracted title"),
ResponseSchema(name="author", description="extracted authors seperated by comma"),
ResponseSchema(name="abstract", description="extracted abstract"),
ResponseSchema(name="keywords", description="extracted keywords")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(template)
],
input_variables=["pdf_first_page_txt"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
llm = ChatOpenAI(model_name='gpt-3.5-turbo-16k',temperature=0.0,max_tokens=6048) # type: ignore gpt-3.5-turbo
final_prompt = prompt.format_prompt(pdf_first_page_txt=pdf_first_page_txt)
output = llm(final_prompt.to_messages())
try:
result = output_parser.parse(output.content)
except:
if "```json" in output.content:
json_string = output.content.split("```json")[1].strip()
else:
json_string = output.content
result = fix_JSON(json_string)
head, tail = os.path.split(pdf_path)
result["filename"] = tail
return result
def get_pdf_page_accept_metadata(pdf_path, pages):
pdf_first_page_txt = get_pdf_first_page_txt(pdf_path, pages)
template = """
I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file.
I need help identifying the accepted date of the article. If the accepted date is not explicitly specified,
it should be located either at the top or bottom of the first or second page of the article in a date format without the prefix 'accepted'.
{format_instructions}
Wrap your final output as a json objects
INPUT:
{pdf_first_page_txt}
YOUR RESPONSE:
"""
response_schemas = [
ResponseSchema(name="accepted", description="extracted accepted date")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(template)
],
input_variables=["pdf_first_page_txt"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
llm = ChatOpenAI(model_name='gpt-3.5-turbo',temperature=0.0,max_tokens=148) # type: ignore gpt-3.5-turbo
final_prompt = prompt.format_prompt(pdf_first_page_txt=pdf_first_page_txt)
output = llm(final_prompt.to_messages())
try:
result = output_parser.parse(output.content)
except:
if "```json" in output.content:
json_string = output.content.split("```json")[1].strip()
else:
json_string = output.content
result = fix_JSON(json_string)
head, tail = os.path.split(pdf_path)
result["filename"] = tail
return result
def get_pdf_intro(pdf_path, pages):
pdf_first_page_txt = get_pdf_first_page_txt(pdf_path, pages)
template = """
I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting
introduction section. Typically, the introduction section begins after the abstract and ends before the next sub-title or section heading.
Wrap your final output as a json objects
INPUT:
{pdf_first_page_txt}
YOUR RESPONSE:
"""
response_schemas = [
ResponseSchema(name="introduction", description="extracted introduction")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(template)
],
input_variables=["pdf_first_page_txt"],
# partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
llm = ChatOpenAI(model_name='gpt-3.5-turbo-16k',temperature=0.0,max_tokens=8396) # type: ignore gpt-3.5-turbo
final_prompt = prompt.format_prompt(pdf_first_page_txt=pdf_first_page_txt)
output = llm(final_prompt.to_messages())
try:
result = output_parser.parse(output.content)
except Exception as e:
print(str(e))
if "```json" in output.content:
json_string = output.content.split("```json")[1].strip()
else:
json_string = output.content
result = fix_JSON(json_string)
head, tail = os.path.split(pdf_path)
result["filename"] = tail
return result
def get_polish_intro(my_intro, sample_introes, words_limit, temperature):
template = """
I require an introduction for my Journal of Economic Literature and I would appreciate it \
if you could compose it for me around {words_limit} words. I would like the introduction mimic on the \
sample introductions that I will provide. If I have already provided my own introduction, \
please refine it accordingly.
% My own introduction: {my_intro}
% Sample introductions:
{sample_introes}
% End of sample introductions:
YOUR RESPONSE:
"""
response_schemas = [
ResponseSchema(name="introduction", description="refined introduction")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(template)
],
input_variables=["my_intro","sample_introes","words_limit"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
llm = ChatOpenAI(model_name='gpt-3.5-turbo',temperature=temperature,max_tokens=2048) # type: ignore gpt-3.5-turbo
final_prompt = prompt.format_prompt(my_intro=my_intro, sample_introes=sample_introes, words_limit=words_limit)
output = llm(final_prompt.to_messages())
result = output.content
return result
def fix_JSON(json_message=None):
result = None
try:
result = json.loads(json_message)
except Exception as e:
# Find the offending character index:
idx_to_replace = int(str(e).split(' ')[-1].replace(')', ''))
# Remove the offending character:
json_message = list(json_message)
json_message[idx_to_replace] = ' '
new_message = ''.join(json_message)
return fix_JSON(json_message=new_message)
return result
def save_pdfs_to_db(pdf_files, excel_file, meta_type='meta', pages=2):
if os.path.exists(excel_file):
df = pd.read_excel(excel_file)
existing_data = df.to_dict(orient='records')
else:
existing_data = []
existing_filenames = set(row['filename'] for row in existing_data)
for doc in pdf_files:
head, tail = os.path.split(doc)
if tail not in existing_filenames:
# print('get meta from LLM '+doc)
try:
if meta_type == 'intro':
metadata = get_pdf_intro2(doc, pages)
elif meta_type == 'date':
metadata = get_pdf_page_accept_metadata(doc, pages)
else:
metadata = get_pdf_page_metadata(doc, pages)
temp_data = []
temp_data.append(metadata)
save_to_excel(existing_data+temp_data, excel_file)
existing_data += temp_data
print("Data append to ", excel_file)
except Exception as e:
print(str(e))
def get_metadata_from_db(excel_file):
df = pd.read_excel(excel_file)
dict = df.to_dict(orient='records',)
return dict
def get_column_from_db(excel_file, column):
df = pd.read_excel(excel_file)
doc = DataFrameLoader(df, column).load()
return doc
def get_data_from_csv(file_path, column_name, filter_value):
data = pd.read_csv(file_path, encoding = 'unicode_escape')
filtered_data = data[data[column_name] == filter_value]
dict_data = filtered_data.to_dict(orient='records') #filtered_data.values.tolist()
for row in dict_data:
md = ast.literal_eval(row["metadata"])
# print(type(md))
row["date"] = md["modDate"]
return dict_data
def get_filename_list(similar_dict, path):
filenames = []
for doc in similar_dict['context']:
filenames.append(os.path.join(path, doc.metadata['filename']))
return filenames
def save_to_excel(data, file_path):
df = pd.DataFrame(data)
df.to_excel(file_path, index=False)
def get_pdf_intro2(pdf_path, pages):
pdf_first_page_txt = get_pdf_first_page_txt(pdf_path, pages)
# pdf_first_page_txt = get_pdf_pages(pdf_path, pages)
human_template = """
I have extracted the text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting the introduction section. Typically, the document follows a pattern where the 'abstract' header is encountered, followed by the abstract section. Subsequently, an 'Introduction' header is expected, which is followed by the introduction section. Next, there may be a 'Background' header or other headers indicating different sections. The introduction section generally concludes before the next sub-title or section heading appears, such as 'Background' or other similar headings.
Please continue searching for the introduction section until you reach a clear next sub-title or section heading. However, please note that if you encounter a bottom part between two pages, such as a section starting with 'RECEIVED:' followed by a date, it does not necessarily mean that the introduction section has ended. In such cases, you should continue searching on the next page.
If the text 'www.elsevier.com' appears in the beginning, it indicates that the literature is published on Elsevier and follows a specific format. In this case, the abstract section will start with "A B S T R A C T" and end before the introduction section. The introduction section will typically start with "1. Introduction" and end before the next section header, such as "2. Background". Please continue searching for the introduction section until you reach next section heading such as "2. Background", it has to be started with "2.".
Please provide the introduction section as the final output in JSON format with the key 'Introduction' written in Pascal case.
Exclude the content of the abstract section.
Only include the text within the introduction section and exclude any text prior to it.
INPUT: {pdf_first_page_txt}
YOUR RESPONSE:
"""
response_schemas = [
# ResponseSchema(name="abstract", description="extracted abstract"),
ResponseSchema(name="introduction", description="extracted introduction")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(human_template)
],
input_variables=["pdf_first_page_txt"]
)
llm = ChatOpenAI(model_name='gpt-3.5-turbo-16k',temperature=0.0,max_tokens=6658) # type: ignore gpt-3.5-turbo
final_prompt = prompt.format_prompt(pdf_first_page_txt=pdf_first_page_txt)
output = llm(final_prompt.to_messages())
try:
result = output_parser.parse(output.content)
except Exception as e:
print(str(e))
if "```json" in output.content:
json_string = output.content.split("```json")[1].strip()
else:
json_string = output.content
result = fix_JSON(json_string)
head, tail = os.path.split(pdf_path)
result["filename"] = tail
return result
def main():
documents = ['./data/docs/literature/Do people care about democracy_An experiment exploring the value of voting rights.pdf',
'./data/docs/literature/Expressive voting versus information avoidance_expenrimental evidence in the context of climate change mitigation.pdf',
'./data/docs/literature/Crashing the party_An experimental investigation of strategic voting in primary elections.pdf',
'./data/docs/literature/Economic growth and political extremism.pdf']
doc = './data/docs/literature_suicide/1-s2.0-S0304387821000432-main.pdf'
doc = './data/docs/literature_suicide/1-s2.0-S0047272721000761-main.pdf'
# doc = './data/docs/literature_suicide/rest_a_00777.pdf'
documents = ['./data/docs/literature/Do people care about democracy_An experiment exploring the value of voting rights.pdf'
,'./data/docs/literature/Expressive voting versus information avoidance_expenrimental evidence in the context of climate change mitigation.pdf'
,'./data/docs/literature/Economic growth and political extremism.pdf' ]
# './data/docs/literature/Expressive voting versus information avoidance_expenrimental evidence in the context of climate change mitigation.pdf',
# './data/docs/literature/Crashing the party_An experimental investigation of strategic voting in primary elections.pdf',
# './data/docs/literature/Economic growth and political extremism.pdf']
# save_pdfs_to_db(documents, intro_excel_file, is_Intro=True, pages=4)
metadata = get_pdf_intro2(doc, 2)
print(metadata)
# docs = get_pdf_first_page_txt(doc, 3)
# # docs = get_pdf_pages(doc, 2)
# # docs = get_pdf_raw_pages(doc, 2)
# print(docs)
# pdf_first_page_txt = get_pdf_first_page_txt(doc, 3)
# raw_txt = get_pdf_raw_pages(fitz.open(doc), 2)
# print(raw_txt)
# pdf_first_page_txt = get_pdf_first_page_txt(doc, 3)
# output_file = "data/db/repo_intro_4.xlsx"
# intro354_excel_file = "data/db/repo_intro_35_16.xlsx"
# save_pdfs_to_db(documents, intro354_excel_file, is_intro=True, pages=4)
# intros = [dict["introduction"] for dict in get_metadata_from_db(intro35_excel_file)]
# polish = get_polish_intro('', intros[:3], 600, 0)
# print(polish)
# csv_file = "./data/db/summary.csv"
# column_name = "Theme"
# filter_value = "China authoritarian system"
# data = get_data_from_csv(csv_file, column_name, filter_value)
# print(data)
if __name__ == '__main__':
main() | [
"sample_introes",
"words_limit",
"format_instructions",
"\n I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting \n specific details, namely: article title, author, abstract and keywords section. Please be aware that if you encounter \n JEL classifications such as C12 and P34, kindly ignore them and refrain from including them in the abstract and keywords. \n \n {format_instructions}\n\n Wrap your final output as a json objects\n\n INPUT:\n {pdf_first_page_txt}\n\n YOUR RESPONSE:\n ",
"\nI have extracted the text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting the introduction section. Typically, the document follows a pattern where the 'abstract' header is encountered, followed by the abstract section. Subsequently, an 'Introduction' header is expected, which is followed by the introduction section. Next, there may be a 'Background' header or other headers indicating different sections. The introduction section generally concludes before the next sub-title or section heading appears, such as 'Background' or other similar headings.\n\nPlease continue searching for the introduction section until you reach a clear next sub-title or section heading. However, please note that if you encounter a bottom part between two pages, such as a section starting with 'RECEIVED:' followed by a date, it does not necessarily mean that the introduction section has ended. In such cases, you should continue searching on the next page.\n\nIf the text 'www.elsevier.com' appears in the beginning, it indicates that the literature is published on Elsevier and follows a specific format. In this case, the abstract section will start with \"A B S T R A C T\" and end before the introduction section. The introduction section will typically start with \"1. Introduction\" and end before the next section header, such as \"2. Background\". Please continue searching for the introduction section until you reach next section heading such as \"2. Background\", it has to be started with \"2.\".\n\nPlease provide the introduction section as the final output in JSON format with the key 'Introduction' written in Pascal case.\n\nExclude the content of the abstract section.\n\nOnly include the text within the introduction section and exclude any text prior to it.\n\nINPUT: {pdf_first_page_txt}\n\nYOUR RESPONSE:\n ",
"\n I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file. \n I need help identifying the accepted date of the article. If the accepted date is not explicitly specified, \n it should be located either at the top or bottom of the first or second page of the article in a date format without the prefix 'accepted'. \n \n {format_instructions}\n\n Wrap your final output as a json objects\n\n INPUT:\n {pdf_first_page_txt}\n\n YOUR RESPONSE:\n ",
"pdf_first_page_txt",
"\n I have extracted text from the initial pages of a Journal of Economic Literature (JEL) PDF file. I require assistance in extracting \n introduction section. Typically, the introduction section begins after the abstract and ends before the next sub-title or section heading. \n \n Wrap your final output as a json objects\n\n INPUT:\n {pdf_first_page_txt}\n\n YOUR RESPONSE:\n ",
"my_intro",
"\n I require an introduction for my Journal of Economic Literature and I would appreciate it if you could compose it for me around {words_limit} words. I would like the introduction mimic on the sample introductions that I will provide. If I have already provided my own introduction, please refine it accordingly. \n\n % My own introduction: {my_intro}\n\n % Sample introductions:\n {sample_introes}\n % End of sample introductions:\n\n YOUR RESPONSE:\n "
] |
2024-01-10 | Kororinpas/Lit_Tool | pdf_documents.py | from pdf_metadata import get_pdf_metadata
from pdf_metadata_llm import get_pdf_metadata_using_llm
def get_pdf_documents(pdf_files):
from langchain.document_loaders import PyMuPDFLoader,DirectoryLoader,UnstructuredPDFLoader
docs =[]
import re
for pdf_fullpath in pdf_files:
metadata = get_pdf_metadata(pdf_fullpath)
if metadata != 'None':
doc = PyMuPDFLoader(pdf_fullpath).load()
for element in doc:
element.metadata = metadata
element.page_content = re.sub('\n+',' ',element.page_content.strip())
docs.append(element)
else:
doc = PyMuPDFLoader(pdf_fullpath).load()
print(f"{pdf_fullpath} is not identified! Using other strategy!!")
metadata = get_pdf_metadata_using_llm(doc)
if metadata != 'None':
for element in doc:
element.metadata = metadata
for element in doc:
element.page_content = re.sub('\n+',' ',element.page_content.strip())
docs.append(element)
return docs | [] |
2024-01-10 | Kororinpas/Lit_Tool | pdf_metadata_llm.py | from doi import get_doi
from document_util import get_split_documents
def get_pdf_metadata_using_llm(doc):
import re
doc[0].page_content = re.sub('\n+',' ',doc[0].page_content.strip())
# from langchain.text_splitter import RecursiveCharacterTextSplitter
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500,chunk_overlap = 50)
split_docs = get_split_documents(doc, 1500, 50)
abstract = split_docs[0]
doi = get_doi(abstract)
if doi != 'None':
import habanero
import time
citation = habanero.cn.content_negotiation(ids = doi,format='bibentry')
time.sleep(5)
import bibtexparser
citation = bibtexparser.loads(citation)
citation = citation.entries[0]
metadata = {'author':citation['author'],
'year':citation['year'],
'title':citation['title'],
'journal':citation['journal'],
}
return metadata
else:
metadata = 'None'
return metadata | [] |
2024-01-10 | Kororinpas/Lit_Tool | cosine_match.py | def search_cosine_similarity(query,split_docs,embeddings): ##query-str,split_docs-list,embeddings-embeddings()
split_docs_content = [content['content'] for content in split_docs]
embed_docs = embeddings.embed_documents(split_docs_content)
embed_query= embeddings.embed_query(query)
from openai.embeddings_utils import cosine_similarity
cos_index = []
for embed_doc in embed_docs:
cos_index.append(cosine_similarity(embed_doc,embed_query))
#这边是根据大小建立索引
idx = sorted(range(len(cos_index)),key=lambda k:cos_index[k]) #根据cos_index的大小进行排序
final_similar_list = []
for index in idx[-3:]:
unit = {}
unit['sentences']=split_docs_content[index]
unit['source']=split_docs[index]['source']
unit['score']=cos_index[index]
final_similar_list.append(unit)
return final_similar_list | [] |
2024-01-10 | Kororinpas/Lit_Tool | embedding_function.py | def get_embedding_function():
from langchain.embeddings import HuggingFaceEmbeddings
import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device':device}
return HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs) | [] |
2024-01-10 | Kororinpas/Lit_Tool | doi.py | def get_doi(abstract):
from kor.extraction import create_extraction_chain
from kor.nodes import Object, Text, Number
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) # type: ignore
schema = Object(
id="doi",
description="doi is a digital identifier.It typically starts with 10. followed by a numeric prefix, such as 10.1000/182.",
attributes=[
Text(
id="doi",
description='doi is a digital identifier. It typically starts with "10." followed by a numeric prefix, such as 10.1000/182.',
examples=[
('American Economic Journal: Economic Policy 2015, 7(4): 223–242 http://dx.doi.org/10.1257/pol.20130367 223 Water Pollution Progress at Borders: The','http://dx.doi.org/10.1257/pol.20130367'),
('Environment and Development Economics (2020), 1–17 doi:10.1017/S1355770X2000025X EDE RESEARCH ARTICLE Political incentives, Party Congress, and pollution cycle: empirical evidence from China Zhihua Tian,1 and Yanfang Tian2* 1School of Economics, Zhejiang University of Technology, Hangzhou','10.1017/S1355770X2000025X')
],
many=True
)
],
many=False
)
chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json')
output = chain.predict_and_parse(text=abstract.page_content)
if 'doi' not in output['data']:
print(f"LLM strategy failed!!{abstract.metadata['source']} Please manually add it!!")
source = 'None'
return source
else:
doi = output['data']['doi']['doi'][0]
if 'doi=' in doi:
doi = doi.split('doi=')[1]
return doi
| [] |
2024-01-10 | jied-O/Jids-Garage | langchainagentstest.py | from langchain import OpenAI
from langchain.chains import LLMChain
from langchain.chains import PALChain
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.agents import load_tools
from ogbujipt.config import openai_emulation
from ogbujipt.model_style.alpaca import prep_instru_inputs, ALPACA_PROMPT_TMPL
from langchain.prompts import PromptTemplate
openai_emulation(host="http://192.168.0.73", port="8000")
def simpleWordPrompt():
prompt = PromptTemplate(
input_variables=["place"],
template="What is the capital of {place}?",
)
print(prompt.format(place="Nigeria"))
llm = OpenAI(temperature=0.1)
llmchain = LLMChain(llm=llm, prompt=prompt)
response = llmchain.run(place="Nigeria")
print(response)
def MathWorldProblem():
llm = OpenAI(temperature=0.1)
palchain = PALChain.from_math_prompt(llm=llm, verbose=True)
response = palchain.run(
"If my age is half of my dad's age and he is going to be 60 next year, what is my current age?"
)
print(response)
def agentTest():
llm = OpenAI(temperature=0)
tools = load_tools(["pal-math"], llm=llm)
agent = initialize_agent(tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True)
agent.run("If my age is half of my dad's age and he is going to be 60 next year, what is my current age?")
def main():
MathWorldProblem()
if __name__ == "__main__":
main() | [
"What is the capital of {place}?"
] |
2024-01-10 | tarunsamanta2k20/quivr | backend~parsers~audio.py | import os
import tempfile
import time
from io import BytesIO
from tempfile import NamedTemporaryFile
import openai
from fastapi import UploadFile
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from utils import compute_sha1_from_content, documents_vector_store
# # Create a function to transcribe audio using Whisper
# def _transcribe_audio(api_key, audio_file, stats_db):
# openai.api_key = api_key
# transcript = ""
# with BytesIO(audio_file.read()) as audio_bytes:
# # Get the extension of the uploaded file
# file_extension = os.path.splitext(audio_file.name)[-1]
# # Create a temporary file with the uploaded audio data and the correct extension
# with tempfile.NamedTemporaryFile(delete=True, suffix=file_extension) as temp_audio_file:
# temp_audio_file.write(audio_bytes.read())
# temp_audio_file.seek(0) # Move the file pointer to the beginning of the file
# transcript = openai.Audio.translate("whisper-1", temp_audio_file)
# return transcript
async def process_audio(upload_file: UploadFile, stats_db):
file_sha = ""
dateshort = time.strftime("%Y%m%d-%H%M%S")
file_meta_name = f"audiotranscript_{dateshort}.txt"
# uploaded file to file object
openai_api_key = os.environ.get("OPENAI_API_KEY")
# Here, we're writing the uploaded file to a temporary file, so we can use it with your existing code.
with tempfile.NamedTemporaryFile(delete=False, suffix=upload_file.filename) as tmp_file:
await upload_file.seek(0)
content = await upload_file.read()
tmp_file.write(content)
tmp_file.flush()
tmp_file.close()
with open(tmp_file.name, "rb") as audio_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
file_sha = compute_sha1_from_content(transcript.text.encode("utf-8"))
file_size = len(transcript.text.encode("utf-8"))
# Load chunk size and overlap from sidebar
chunk_size = 500
chunk_overlap = 0
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size, chunk_overlap=chunk_overlap)
texts = text_splitter.split_text(transcript)
docs_with_metadata = [Document(page_content=text, metadata={"file_sha1": file_sha, "file_size": file_size, "file_name": file_meta_name,
"chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort}) for text in texts]
# if st.secrets.self_hosted == "false":
# add_usage(stats_db, "embedding", "audio", metadata={"file_name": file_meta_name,"file_type": ".txt", "chunk_size": chunk_size, "chunk_overlap": chunk_overlap})
documents_vector_store.add_documents(docs_with_metadata)
return documents_vector_store
| [] |
2024-01-10 | sshh12/llm_optimize | llm_optimize~optimize.py | from typing import Callable, Optional, Tuple, List
import re
import openai
from langchain.input import print_text
from langchain.prompts.chat import (
SystemMessage,
HumanMessage,
AIMessage,
)
from llm_optimize import llm, constants
# The numeric score and the LLM-facing representation
ScoreTuple = Tuple[float, str]
# Best score, history of scores, best x0
OptimizationResultTuple = Tuple[float, List[float], str]
def run(
task_description: str,
task_question: str,
func: Callable[[str], ScoreTuple],
x0: str,
max_steps: Optional[int] = 10,
model: Optional[llm.LLMModel] = None,
verbose: Optional[bool] = True,
system_prompt: Optional[str] = constants.SYSTEM_PROMPT,
human_prompt: Optional[str] = constants.HUMAN_OPTIMIZATION_PROMPT,
stop_score: Optional[float] = None,
) -> OptimizationResultTuple:
if model is None:
model = llm.get_default_llm()
def _log(text: str, color: str):
if verbose:
print_text(text + "\n", color)
x = x0
score, fx = func(x)
best_score = score
best_x = x
_log(x, "blue")
_log(fx, "green")
messages = [
SystemMessage(content=system_prompt.format(task_description=task_description)),
HumanMessage(content=human_prompt.format(task_question=task_question, x=x, fx=fx)),
]
score_hist = [score]
for _ in range(max_steps):
try:
resp = model(messages).content
except openai.error.InvalidRequestError as e:
_log(str(e), "red")
# drop the first set of results to reduce token usage
messages.pop(1)
messages.pop(1)
resp = model(messages).content
_log(resp, "yellow")
try:
x = re.findall("```(?:\w+)?([\s\S]+)```", resp)[0]
except IndexError as e:
_log(f"Stopping early, failed to parse response. {e}", "red")
break
_log(x, "blue")
score, fx = func(x)
score_hist.append(score)
if score > best_score:
best_x = x
best_score = score
_log(fx, "green")
messages.append(AIMessage(content=resp))
messages.append(HumanMessage(content=human_prompt.format(task_question=task_question, x=x, fx=fx)))
if stop_score is not None and best_score >= stop_score:
break
return (best_score, score_hist, best_x)
| [] |
2024-01-10 | xiahan4956/Auto_Claude_100k | autogpt~llm~api_manager.py | from __future__ import annotations
from typing import List, Optional
import openai
from openai import Model
from autogpt.config import Config
from autogpt.llm.base import CompletionModelInfo, MessageDict
from autogpt.llm.providers.openai import OPEN_AI_MODELS
from autogpt.logs import logger
from autogpt.singleton import Singleton
class ApiManager(metaclass=Singleton):
def __init__(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0
self.models: Optional[list[Model]] = None
def reset(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0.0
self.models = None
def create_chat_completion(
self,
messages: list[MessageDict],
model: str | None = None,
temperature: float = None,
max_tokens: int | None = None,
deployment_id=None,
):
"""
Create a chat completion and update the cost.
Args:
messages (list): The list of messages to send to the API.
model (str): The model to use for the API call.
temperature (float): The temperature to use for the API call.
max_tokens (int): The maximum number of tokens for the API call.
Returns:
str: The AI's response.
"""
cfg = Config()
if temperature is None:
temperature = cfg.temperature
if deployment_id is not None:
response = openai.ChatCompletion.create(
deployment_id=deployment_id,
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=cfg.openai_api_key,
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
api_key=cfg.openai_api_key,
)
if not hasattr(response, "error"):
logger.debug(f"Response: {response}")
prompt_tokens = response.usage.prompt_tokens
completion_tokens = response.usage.completion_tokens
self.update_cost(prompt_tokens, completion_tokens, model)
return response
def update_cost(self, prompt_tokens, completion_tokens, model: str):
"""
Update the total cost, prompt tokens, and completion tokens.
Args:
prompt_tokens (int): The number of tokens used in the prompt.
completion_tokens (int): The number of tokens used in the completion.
model (str): The model used for the API call.
"""
# the .model property in API responses can contain version suffixes like -v2
model = model[:-3] if model.endswith("-v2") else model
model_info = OPEN_AI_MODELS[model]
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
self.total_cost += prompt_tokens * model_info.prompt_token_cost / 1000
if issubclass(type(model_info), CompletionModelInfo):
self.total_cost += (
completion_tokens * model_info.completion_token_cost / 1000
)
logger.debug(f"Total running cost: ${self.total_cost:.3f}")
def set_total_budget(self, total_budget):
"""
Sets the total user-defined budget for API calls.
Args:
total_budget (float): The total budget for API calls.
"""
self.total_budget = total_budget
def get_total_prompt_tokens(self):
"""
Get the total number of prompt tokens.
Returns:
int: The total number of prompt tokens.
"""
return self.total_prompt_tokens
def get_total_completion_tokens(self):
"""
Get the total number of completion tokens.
Returns:
int: The total number of completion tokens.
"""
return self.total_completion_tokens
def get_total_cost(self):
"""
Get the total cost of API calls.
Returns:
float: The total cost of API calls.
"""
return self.total_cost
def get_total_budget(self):
"""
Get the total user-defined budget for API calls.
Returns:
float: The total budget for API calls.
"""
return self.total_budget
def get_models(self) -> List[Model]:
"""
Get list of available GPT models.
Returns:
list: List of available GPT models.
"""
if self.models is None:
all_models = openai.Model.list()["data"]
self.models = [model for model in all_models if "gpt" in model["id"]]
return self.models
| [] |
2024-01-10 | xiahan4956/Auto_Claude_100k | autogpt~llm~utils~claude.py | from autogpt.config import Config
import time
import openai
import json
CFG = Config()
openai.api_key = CFG.openai_api_key
MAX_TOKEN_ONCE = 100000
CONTINUE_PROMPT = "... continue"
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
def _sendReq(anthropic, prompt, max_tokens_to_sample):
print("----------------request----------------")
print(prompt)
print("----------------request----------------\n")
print("the input words of claude: "+str(len(prompt)))
for _ in range(5):
try:
response = anthropic.completions.create(
prompt=prompt,
stop_sequences = [HUMAN_PROMPT, AI_PROMPT],
model="claude-2",
max_tokens_to_sample=max_tokens_to_sample,
temperature = 0.3
)
break
except Exception as e:
print(e)
time.sleep(1)
return response
def sendReq(question, max_tokens_to_sample: int = MAX_TOKEN_ONCE):
anthropic = Anthropic(api_key = CFG.claude_api_key)
prompt = f"{question} {anthropic.AI_PROMPT}"
response = _sendReq(anthropic, prompt, max_tokens_to_sample)
data = response.completion
return data
def pmt_gpt_to_claude(question):
question = str(question)[1:-1]
question = question.replace("{\'role\': \'system\', \'content\':","\n\nSYSTEM:")
question = question.replace("{\'role\': \'user\', \'content\':","\n\nHuman:")
question = question.replace("{\'role\': \'assistant\', \'content\':","\n\nAssistant:")
question = question.replace("\'}","")
return question
def fix_claude_json(claude_resp):
messages = [{"role":"system","content":r"1. You will receive a JSON string, and your task is to extract information from it and return it as a JSON object. 2.Use function's json schema to extrct.Please notice the format 3. Be aware that the given JSON may contain errors, so you may need to infer the fields and the format from the JSON string. 4.Do not use \" and \' .you should use ' " },{"role": "user", "content": claude_resp}]
functions = [
{
"name": "parse_claude_json",
"description": "parse a claude response to the json",
"parameters": {
"type": "object",
"properties": {
"thoughts": {
"type": "object",
"properties": {
"text": {
"type": "string",
"description": "thoughts"
},
"reasoning": {
"type": "string"
},
"plan": {
"type": "string",
"description": "it is a string,not list.If you find it is list,please use correct it "
},
"criticism": {
"type": "string",
"description": "constructive self-criticism"
},
"speak": {
"type": "string",
"description": "thoughts summary to say to user"
}
},
"required": ["text", "reasoning", "plan", "criticism", "speak"],
},
"command": {
"type": "object",
"properties": {
"name": {"type": "string"},
"args": {
"type": "object"
}
},
"required": ["name", "args"],
}
},
"required": ["thoughts", "command"],
},
},
]
resp_json = claude_resp
for _ in range(5):
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=functions,
max_tokens=3000,
temperature=0.0,
)
resp_json = response["choices"][0]["message"]["function_call"]["arguments"]
break
except Exception as e:
time.sleep(1)
print(e)
# fix the plan
try:
resp_json = json.loads(resp_json)
resp_json["thoughts"]["plan"] = str(resp_json["thoughts"]["plan"]).replace("[","").replace("]","")
resp_json = json.dumps(resp_json)
except Exception as e:
print(e)
return resp_json
| [
"f\"{question} {anthropic.AI_PROMPT}",
"1. You will receive a JSON string, and your task is to extract information from it and return it as a JSON object. 2.Use function's json schema to extrct.Please notice the format 3. Be aware that the given JSON may contain errors, so you may need to infer the fields and the format from the JSON string. 4.Do not use \\\" and \\' .you should use ' ",
"... continue"
] |
2024-01-10 | pkrack/asp | asp~ppo_patched.py | import warnings
from typing import Any, Dict, Optional, Type, TypeVar, Union
import numpy as np
import torch as th
from gymnasium import spaces
from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, BasePolicy, MultiInputActorCriticPolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import explained_variance, get_schedule_fn
from torch.nn import functional as F
SelfPPO = TypeVar("SelfPPO", bound="PPO")
class PPO(OnPolicyAlgorithm):
"""
Proximal Policy Optimization algorithm (PPO) (clip version)
Paper: https://arxiv.org/abs/1707.06347
Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)
NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization)
See https://github.com/pytorch/pytorch/issues/29372
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param normalize_advantage: Whether to normalize or not the advantage
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for
debug messages
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
policy_aliases: Dict[str, Type[BasePolicy]] = {
"MlpPolicy": ActorCriticPolicy,
"CnnPolicy": ActorCriticCnnPolicy,
"MultiInputPolicy": MultiInputActorCriticPolicy,
}
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
n_steps: int = 2048,
batch_size: int = 64,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: Union[float, Schedule] = 0.2,
clip_range_vf: Union[None, float, Schedule] = None,
normalize_advantage: bool = True,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
use_sde: bool = False,
sde_sample_freq: int = -1,
target_kl: Optional[float] = None,
tensorboard_log: Optional[str] = None,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super().__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
seed=seed,
_init_setup_model=False,
supported_action_spaces=(
spaces.Box,
spaces.Discrete,
spaces.MultiDiscrete,
spaces.MultiBinary,
),
)
# Sanity check, otherwise it will lead to noisy gradient and NaN
# because of the advantage normalization
if normalize_advantage:
assert (
batch_size > 1
), "`batch_size` must be greater than 1. See https://github.com/DLR-RM/stable-baselines3/issues/440"
if self.env is not None:
# Check that `n_steps * n_envs > 1` to avoid NaN
# when doing advantage normalization
buffer_size = self.env.num_envs * self.n_steps
assert buffer_size > 1 or (
not normalize_advantage
), f"`n_steps * n_envs` must be greater than 1. Currently n_steps={self.n_steps} and n_envs={self.env.num_envs}"
# Check that the rollout buffer size is a multiple of the mini-batch size
untruncated_batches = buffer_size // batch_size
if buffer_size % batch_size > 0:
warnings.warn(
f"You have specified a mini-batch size of {batch_size},"
f" but because the `RolloutBuffer` is of size `n_steps * n_envs = {buffer_size}`,"
f" after every {untruncated_batches} untruncated mini-batches,"
f" there will be a truncated mini-batch of size {buffer_size % batch_size}\n"
f"We recommend using a `batch_size` that is a factor of `n_steps * n_envs`.\n"
f"Info: (n_steps={self.n_steps} and n_envs={self.env.num_envs})"
)
self.batch_size = batch_size
self.n_epochs = n_epochs
self.clip_range = clip_range
self.clip_range_vf = clip_range_vf
self.normalize_advantage = normalize_advantage
self.target_kl = target_kl
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super()._setup_model()
# Initialize schedules for policy/value clipping
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_vf is not None:
if isinstance(self.clip_range_vf, (float, int)):
assert self.clip_range_vf > 0, "`clip_range_vf` must be positive, " "pass `None` to deactivate vf clipping"
self.clip_range_vf = get_schedule_fn(self.clip_range_vf)
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value function
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
entropy_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
continue_training = True
loss = None
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data, bc_data in self.rollout_buffer.get(self.batch_size):
# Re-sample the noise matrix because the log_std has changed
if self.use_sde:
self.policy.reset_noise(self.batch_size)
if bc_data is None:
bc_loss = th.zeros(1, device=self.device)
else:
_, log_probs, _ = self.policy.evaluate_actions(bc_data.obs, bc_data.action)
ratio = th.exp(log_probs - bc_data.log_prob)
bc_loss = -th.mean(th.clamp(ratio, 1 - clip_range, 1 + clip_range)).to(self.device)
if rollout_data is not None:
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)
values = values.flatten()
# Normalize advantage
advantages = rollout_data.advantages
# Normalization does not make sense if mini batchsize == 1, see GH issue #325
if self.normalize_advantage and len(advantages) > 1:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantages * ratio
policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()
clip_fractions.append(clip_fraction)
if self.clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the difference between old and new value
# NOTE: this depends on the reward scaling
values_pred = rollout_data.old_values + th.clamp(
values - rollout_data.old_values, -clip_range_vf, clip_range_vf
)
# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(rollout_data.returns, values_pred)
value_losses.append(value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
entropy_losses.append(entropy_loss.item())
# Calculate approximate form of reverse KL Divergence for early stopping
# see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417
# and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419
# and Schulman blog: http://joschu.net/blog/kl-approx.html
with th.no_grad():
log_ratio = log_prob - rollout_data.old_log_prob
approx_kl_div = th.mean((th.exp(log_ratio) - 1) - log_ratio).cpu().numpy()
approx_kl_divs.append(approx_kl_div)
if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl:
continue_training = False
if self.verbose >= 1:
print(f"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}")
break
loss = (policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss) + self.bc_coef * bc_loss
else:
loss = bc_loss
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
if not continue_training:
break
if loss:
self._n_updates += self.n_epochs
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
if loss:
self.logger.record("train/entropy_loss", np.mean(entropy_losses))
self.logger.record("train/policy_gradient_loss", np.mean(pg_losses))
self.logger.record("train/value_loss", np.mean(value_losses))
self.logger.record("train/approx_kl", np.mean(approx_kl_divs))
self.logger.record("train/clip_fraction", np.mean(clip_fractions))
self.logger.record("train/loss", loss.item())
self.logger.record("train/explained_variance", explained_var)
if hasattr(self.policy, "log_std"):
self.logger.record("train/std", th.exp(self.policy.log_std).mean().item())
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/clip_range", clip_range)
if self.clip_range_vf is not None:
self.logger.record("train/clip_range_vf", clip_range_vf)
else:
self.logger.info("No valid goals in the batch, skipping update")
def learn(
self: SelfPPO,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
tb_log_name: str = "PPO",
reset_num_timesteps: bool = True,
progress_bar: bool = False,
) -> SelfPPO:
return super().learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
tb_log_name=tb_log_name,
reset_num_timesteps=reset_num_timesteps,
progress_bar=progress_bar,
)
| [] |
2024-01-10 | jongio/chat-with-your-data-solution-accelerator | backend~utilities~orchestrator~Strategies.py | from enum import Enum
class OrchestrationStrategy(Enum):
OPENAI_FUNCTION = 'openai_function'
LANGCHAIN = 'langchain'
def get_orchestrator(orchestration_strategy: str):
if orchestration_strategy == OrchestrationStrategy.OPENAI_FUNCTION.value:
from .OpenAIFunctions import OpenAIFunctionsOrchestrator
return OpenAIFunctionsOrchestrator()
elif orchestration_strategy == OrchestrationStrategy.LANGCHAIN.value:
from .LangChainAgent import LangChainAgent
return LangChainAgent()
else:
raise Exception(f"Unknown orchestration strategy: {orchestration_strategy}")
| [] |
2024-01-10 | jongio/chat-with-your-data-solution-accelerator | backend~utilities~document_chunking~Layout.py | from typing import List
from .DocumentChunkingBase import DocumentChunkingBase
from langchain.text_splitter import MarkdownTextSplitter
from .Strategies import ChunkingSettings
from ..common.SourceDocument import SourceDocument
class LayoutDocumentChunking(DocumentChunkingBase):
def __init__(self) -> None:
pass
def chunk(self, documents: List[SourceDocument], chunking: ChunkingSettings) -> List[SourceDocument]:
full_document_content = "".join(list(map(lambda document: document.content, documents)))
document_url = documents[0].source
splitter = MarkdownTextSplitter.from_tiktoken_encoder(chunk_size=chunking.chunk_size, chunk_overlap=chunking.chunk_overlap)
chunked_content_list = splitter.split_text(full_document_content)
# Create document for each chunk
documents = []
chunk_offset = 0
for idx, chunked_content in enumerate(chunked_content_list):
documents.append(
SourceDocument.from_metadata(
content=chunked_content,
document_url=document_url,
metadata={"offset": chunk_offset},
idx=idx,
)
)
chunk_offset += len(chunked_content)
return documents
| [] |
2024-01-10 | jongio/chat-with-your-data-solution-accelerator | backend~utilities~helpers~LLMHelper.py | import openai
from typing import List
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from .EnvHelper import EnvHelper
class LLMHelper:
def __init__(self):
env_helper: EnvHelper = EnvHelper()
# Configure OpenAI API
openai.api_type = "azure"
openai.api_version = env_helper.AZURE_OPENAI_API_VERSION
openai.api_base = env_helper.OPENAI_API_BASE
openai.api_key = env_helper.OPENAI_API_KEY
self.llm_model = env_helper.AZURE_OPENAI_MODEL
self.llm_max_tokens = env_helper.AZURE_OPENAI_MAX_TOKENS if env_helper.AZURE_OPENAI_MAX_TOKENS != '' else None
self.embedding_model = env_helper.AZURE_OPENAI_EMBEDDING_MODEL
def get_llm(self):
return AzureChatOpenAI(deployment_name=self.llm_model, temperature=0, max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
# TODO: This needs to have a custom callback to stream back to the UI
def get_streaming_llm(self):
return AzureChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler], deployment_name=self.llm_model, temperature=0,
max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
def get_embedding_model(self):
return OpenAIEmbeddings(deployment=self.embedding_model, chunk_size=1)
def get_chat_completion_with_functions(self, messages: List[dict], functions: List[dict], function_call: str="auto"):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
functions=functions,
function_call=function_call,
)
def get_chat_completion(self, messages: List[dict]):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
)
| [] |
2024-01-10 | pcc2k00/HousingPriceTrend | HousingPriceTrendMetaphor.py | import openai
import yaml
from metaphor_python import Metaphor
with open("pass.yml") as f:
content = f.read()
my_credentials = yaml.load(content, Loader=yaml.FullLoader)
openai.api_key = my_credentials["openAi"]
metaphor = Metaphor(my_credentials["metaphor"])
USER_QUESTION = "Recent housing price in Seattle"
SYSTEM_MESSAGE = "You are a helpful assistant that generates search queiries based on user questions. Only generate one search query."
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_MESSAGE},
{"role": "user", "content": USER_QUESTION},
],
)
query = completion.choices[0].message.content
search_response = metaphor.search(
query, use_autoprompt=True, start_published_date="2023-07-01"
)
contents_result = search_response.get_contents()
first_result = contents_result.contents[0]
SYSTEM_MESSAGE = "You are a helpful assistant that summarizes the content of a webpage. Summarize the users input."
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_MESSAGE},
{"role": "user", "content": first_result.extract},
],
)
summary = completion.choices[0].message.content
print(f"Summary for {first_result.title}: {summary}")
| [] |
2024-01-10 | romain-cambonie/openxcom-mod-generator | src~chat~ask_for_visual_proposition.py | from openai import OpenAI
from openai.types.chat import ChatCompletion
def ask_for_concept_art(
client: OpenAI,
character_story: str,
art_style_description: str,
) -> str:
system_prompt = (
"Generate a comprehensive and vivid visual concept art of a character for a piece of artwork. "
"The character should fit within a distinct theme and style, and the description must be detailed enough to guide an "
"artist in creating a dynamic and engaging image."
"Here are the guidelines for your description:"
"Theme and Setting: Choose an intriguing theme and setting for the character. It could be anything from a dystopian "
"future to a fantasy world. "
"Describe the setting in a way that complements the character's story and personality."
"Character Details:"
"Physical Appearance: Provide a detailed description of the character's physical features, including hair, eyes, "
"skin, and build."
"Expression and Posture: Convey the character's mood or personality through their expression and posture."
"Attire and Equipment: Describe the character's clothing and any distinctive equipment they might carry, "
"do NOT use proper noun, describe visually what the items look like."
f"Artistic Style: Specify the desired artistic style for the portrayal. The starting point is : "
f"{art_style_description}, make sure to detail the stylistic elements that should be emphasized."
"Composition and Color Palette: Suggest a striking composition for the artwork"
"Describe the character stance"
"Describe the color palette, considering how colors can reflect the character's traits or the mood of the setting."
"Extract up to 8 keys focusing on the art style and composition"
"Use these guidelines to create a structured and detailed visual description for a character based on the following "
"origin story:"
"Focus on making the description as vivid and detailed as possible, so it can easily be translated into a stunning "
"piece of art."
""
"An example of a good concept art result:"
"Keys: Commanding presence, Dynamic composition, Low angle perspective, Cold metallic shades, Warm leather tones, "
"Dramatic lighting, Cyberpunk aesthetic"
"Character Details: She is light-skinned with a muscular build, short blonde hair, and piercing light-colored eyes "
"that radiate intelligence and cunning. Her expression is one of chilling neutrality, a reflection of her spirit "
"shaped by the cold, ruthless Arctic."
"Attire and Equipment: Her attire combines functionality with a touch of brutality – a sleek, black chest armor that "
"bulges with the strength of her physique, complemented by large shoulder pads. Her arms are covered with highly "
"detailed armor, and her legs are clad in thigh-high boots with sturdy knee pads. Fortified gloves adorn her hands. "
"In one hand, she deftly holds a leather whip, an emblem of elegance and cruelty, while her other hand grips a robust "
"submachine gun. Around her waist are vials containing clear liquid and spherical objects reminiscent of primitive "
"grenades, adding to her enigmatic persona. A handle and a battle axe, symbols of her defiance and skill, "
"are fastened at her side."
"Setting: The backdrop is a post-apocalyptic Arctic tundra, subtly hinting at her origins. The environment should be "
"bleak yet captivating, with remnants of a once-thriving world now lost to chaos and rebellion."
"Artistic Style and Composition: The portrait should capture her commanding presence amidst this desolate backdrop. "
"The composition should be dynamic, focusing on her from a slightly low angle to emphasize her dominance. The color "
"palette should be a blend of cold metallic shades and warmer tones from her leather armor, creating a vivid contrast "
"that underscores her determination and grit. The lighting should be dramatic, highlighting her features and the "
"textures of her gear, enhancing the overall cyberpunk aesthetic."
)
user_prompt = f"Character story: {character_story}"
response: ChatCompletion = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
)
return str(response.choices[0].message.content)
| [
"Generate a comprehensive and vivid visual concept art of a character for a piece of artwork. The character should fit within a distinct theme and style, and the description must be detailed enough to guide an artist in creating a dynamic and engaging image.Here are the guidelines for your description:Theme and Setting: Choose an intriguing theme and setting for the character. It could be anything from a dystopian future to a fantasy world. Describe the setting in a way that complements the character's story and personality.Character Details:Physical Appearance: Provide a detailed description of the character's physical features, including hair, eyes, skin, and build.Expression and Posture: Convey the character's mood or personality through their expression and posture.Attire and Equipment: Describe the character's clothing and any distinctive equipment they might carry, do NOT use proper noun, describe visually what the items look like.Artistic Style: Specify the desired artistic style for the portrayal. The starting point is : PLACEHOLDER, make sure to detail the stylistic elements that should be emphasized.Composition and Color Palette: Suggest a striking composition for the artworkDescribe the character stanceDescribe the color palette, considering how colors can reflect the character's traits or the mood of the setting.Extract up to 8 keys focusing on the art style and compositionUse these guidelines to create a structured and detailed visual description for a character based on the following origin story:Focus on making the description as vivid and detailed as possible, so it can easily be translated into a stunning piece of art.An example of a good concept art result:Keys: Commanding presence, Dynamic composition, Low angle perspective, Cold metallic shades, Warm leather tones, Dramatic lighting, Cyberpunk aestheticCharacter Details: She is light-skinned with a muscular build, short blonde hair, and piercing light-colored eyes that radiate intelligence and cunning. Her expression is one of chilling neutrality, a reflection of her spirit shaped by the cold, ruthless Arctic.Attire and Equipment: Her attire combines functionality with a touch of brutality – a sleek, black chest armor that bulges with the strength of her physique, complemented by large shoulder pads. Her arms are covered with highly detailed armor, and her legs are clad in thigh-high boots with sturdy knee pads. Fortified gloves adorn her hands. In one hand, she deftly holds a leather whip, an emblem of elegance and cruelty, while her other hand grips a robust submachine gun. Around her waist are vials containing clear liquid and spherical objects reminiscent of primitive grenades, adding to her enigmatic persona. A handle and a battle axe, symbols of her defiance and skill, are fastened at her side.Setting: The backdrop is a post-apocalyptic Arctic tundra, subtly hinting at her origins. The environment should be bleak yet captivating, with remnants of a once-thriving world now lost to chaos and rebellion.Artistic Style and Composition: The portrait should capture her commanding presence amidst this desolate backdrop. The composition should be dynamic, focusing on her from a slightly low angle to emphasize her dominance. The color palette should be a blend of cold metallic shades and warmer tones from her leather armor, creating a vivid contrast that underscores her determination and grit. The lighting should be dramatic, highlighting her features and the textures of her gear, enhancing the overall cyberpunk aesthetic.",
"Character story: PLACEHOLDER"
] |
2024-01-10 | romain-cambonie/openxcom-mod-generator | src~dalle~call_dalle_and_save_image.py | import requests
from openai import OpenAI
from pathlib import Path
from typing import Optional
from openai.types import ImagesResponse
def call_dalle_and_save_image(prompt: str, client: OpenAI, output_file_path: Path) -> Optional[Path]:
try:
# Generate image using OpenAI client
response: ImagesResponse = client.images.generate(
prompt=prompt, n=1, model="dall-e-3", size="1024x1024", quality="hd", response_format="url"
)
# Extract the image URL
image_url = response.data[0].url
if not image_url:
print("No image URL found in the response.")
return None
print(image_url)
# Download the image
image_response = requests.get(image_url)
if image_response.status_code == 200:
# Write the image data to a file
with open(output_file_path, "wb") as file:
file.write(image_response.content)
return output_file_path
else:
print(f"Error downloading image: {image_response.status_code}")
return None
except Exception as e:
print(f"An error occurred: {e}")
return None
| [] |
2024-01-10 | romain-cambonie/openxcom-mod-generator | src~chat~ask_for_dalle_character_prompt.py | from openai import OpenAI
from openai.types.chat import ChatCompletion
def ask_for_dalle_character_prompt(
client: OpenAI,
concept_art_description: str,
) -> str:
system_prompt = (
"You're given a detailed concept art description of a character. Your task is to condense this description into a "
"succinct, vivid DALL-E prompt."
"The DALL-E prompt should accurately capture the key visual elements and artistic style described in the concept art, "
"while being concise enough for effective image generation. "
"Here is the concept art description to be transformed into a DALL-E prompt:\n"
f"{concept_art_description}\n"
"Based on this description, refine this concept into a DALL-E prompt that contains, in order references to the art "
"style, composition, subject, location, colors;"
"The prompt must not be more than 130 words, encapsulating the essence of the concept art."
f"The prompt must start with the keys of the concept art"
)
user_prompt = "Transform the above concept art description into a succinct DALL-E prompt."
response: ChatCompletion = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
)
return str(response.choices[0].message.content)
| [
"Transform the above concept art description into a succinct DALL-E prompt.",
"You're given a detailed concept art description of a character. Your task is to condense this description into a succinct, vivid DALL-E prompt.The DALL-E prompt should accurately capture the key visual elements and artistic style described in the concept art, while being concise enough for effective image generation. Here is the concept art description to be transformed into a DALL-E prompt:\nPLACEHOLDER\nBased on this description, refine this concept into a DALL-E prompt that contains, in order references to the art style, composition, subject, location, colors;The prompt must not be more than 130 words, encapsulating the essence of the concept art.The prompt must start with the keys of the concept art"
] |
2024-01-10 | romain-cambonie/openxcom-mod-generator | src~chat~ask_for_origin_story.py | from openai import OpenAI
from openai.types.chat import ChatCompletion
def ask_for_origin_story(
client: OpenAI,
character_name: str,
equipment_description: str,
appearance_description: str,
) -> str:
system_prompt = (
"You are tasked with creating a short origin story for a fictional character. "
"You will receive three key pieces of information: (1) the character's name, "
"(2) a YAML payload detailing the character's equipment, and "
"(3) an image that shows some characteristics of the character's appearance. "
"Your job is to weave these elements together into a compelling and imaginative origin story. "
"The story should be concise, no more than a few paragraphs, and should creatively incorporate specific details from "
"the YAML payload and the visual cues from the image. "
"The tone and style of the story should align with the genre suggested by the character's name and appearance. "
"Be imaginative and ensure that the equipment and visual traits play a significant role in the character's background "
"and the events that shaped them."
"Pay special attention to match all visual description details such as gender, race, skin color, hair color and so on "
)
user_prompt = (
f"Character Name: {character_name}\n\nEquipment: {equipment_description}\n\nAppearance: "
f"{appearance_description}\n\nBased on the above details, create a short origin story for the character."
)
response: ChatCompletion = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
)
return str(response.choices[0].message.content)
| [
"You are tasked with creating a short origin story for a fictional character. You will receive three key pieces of information: (1) the character's name, (2) a YAML payload detailing the character's equipment, and (3) an image that shows some characteristics of the character's appearance. Your job is to weave these elements together into a compelling and imaginative origin story. The story should be concise, no more than a few paragraphs, and should creatively incorporate specific details from the YAML payload and the visual cues from the image. The tone and style of the story should align with the genre suggested by the character's name and appearance. Be imaginative and ensure that the equipment and visual traits play a significant role in the character's background and the events that shaped them.Pay special attention to match all visual description details such as gender, race, skin color, hair color and so on ",
"Character Name: PLACEHOLDER\n\nEquipment: PLACEHOLDER\n\nAppearance: PLACEHOLDER\n\nBased on the above details, create a short origin story for the character."
] |
2024-01-10 | outlines-dev/outlines | outlines~models~__init__.py | """Module that contains all the models integrated in outlines.
We group the models in submodules by provider instead of theme (completion, chat
completion, diffusers, etc.) and use routing functions everywhere else in the
codebase.
"""
from .awq import awq
from .exllamav2 import exl2
from .gptq import gptq
from .llamacpp import LlamaCpp, llamacpp
from .mamba import Mamba, mamba
from .openai import OpenAI, openai
from .transformers import Transformer, transformers
| [] |
2024-01-10 | ball2004244/Pinecone-Hackathon-23-Backend | logic~pinecone_db.py | '''
This file contains the logic for storing and querying data from Pinecone.
'''
from typing import List
from langchain.vectorstores import Pinecone
from langchain.chains.summarize import load_summarize_chain
from langchain.llms import GooglePalm
from langchain.embeddings.google_palm import GooglePalmEmbeddings
from langchain.schema import Document
import pinecone
from pinecone import DescribeIndexStatsResponse
class PineconeTrainer:
def __init__(self, gcp_api_key: str, pinecone_api_key: str, pinecone_environment: str):
self.gcp_api_key = gcp_api_key
self.pinecone_api_key = pinecone_api_key
self.pinecone_environment = pinecone_environment
self.palm_config = {
'temperature': 0.7,
'google_api_key': self.gcp_api_key,
}
self.index_name = 'paragraph-summarizer'
self.llm = GooglePalm(**self.palm_config)
self.chain = load_summarize_chain(self.llm, chain_type='stuff')
self.embeddings = GooglePalmEmbeddings(**self.palm_config)
self.pinecone_init(self.index_name, 'cosine', 768)
def pinecone_init(self, index_name: str, metric: str, dimension: int) -> None:
pinecone.init(
api_key=self.pinecone_api_key,
environment=self.pinecone_environment,
)
# check if index exists
if index_name not in pinecone.list_indexes():
pinecone.create_index(name=index_name, metric=metric, dimension=dimension)
self.index = pinecone.Index(index_name=index_name)
self.vectordb = Pinecone(index=self.index, embedding_function=self.embeddings.embed_query, text_key='text')
def add_data(self, input_list: List[str]=[]) -> None:
document_list = [Document(page_content=input_list[i]) for i in range(len(input_list))]
self.vectordb = Pinecone.from_documents(document_list, embedding=self.embeddings, index_name=self.index_name)
print('Data added successfully!, %s vectors added' % len(input_list))
def delete_all_data(self) -> None:
pass
def query(self, query: str=' ', question: str='Summarize in 3 sentences') -> str:
search = self.vectordb.similarity_search(query=query, k=3)
summary = self.chain.run(input_documents=search, question=question)
return summary
def get_index_info(self) -> DescribeIndexStatsResponse:
index = pinecone.GRPCIndex(self.index_name)
output = index.describe_index_stats()
return output
def embed_text(self, text: str) -> List[float]:
return self.embeddings.embed_query(text)
def pinecone_train(self, input_file: str) -> None:
try:
input_list = self.extract_input_text(input_file)
self.add_data(input_list)
except Exception as e:
print(e)
@staticmethod
def extract_input_text(input_file: str) -> List[str]:
from logic.data_extract import extract_data, extract_text
data = extract_data(input_file)
texts = extract_text(data)
return texts
@staticmethod
def extract_output_text(input_file: str) -> List[str]:
from logic.data_extract import extract_data, extract_output_text
data = extract_data(input_file)
texts = extract_output_text(data)
return texts
if __name__ == '__main__':
pass | [] |
2024-01-10 | TheoKanning/crossword | crossword~clues.py | import json
import os
import openai
def convert_raw_clues(raw_filename, output_filename):
"""
Reads raw clue info from raw_filename, formats it to match GPT-3's fine-tune input, and writes it to output_filename
Raw clues are formatted like "Up in the air : ALOFT"
"""
with open(output_filename, "w+") as f_out:
f_out.write("farts")
with open(raw_filename, "r") as f_in:
with open(output_filename, "w+") as f_out:
for line in f_in.readlines():
line = line.strip()
if not line:
continue
if line.isnumeric():
# This line is a clue number, ignore it
continue
if line.lower() == "down" or line.lower() == "across":
continue
components = line.rsplit(
":", 1
) # split from end to handle colons inside clues
if len(components) != 2:
print(line)
continue
clue = components[0].strip()
answer = components[1].strip()
f_out.write(
json.dumps(
{
"prompt": f"Answer: {answer.lower()}\nClue:",
"completion": f" {clue}\n",
}
)
)
f_out.write("\n")
def get_clue(answer):
prompt = f"Answer: {answer.lower()}\nClue:"
openai.api_key = os.getenv("OPENAI_API_KEY")
result = openai.Completion.create(
model="curie:ft-personal-2022-04-30-18-38-57", prompt=prompt, stop="\n", n=5
)
print(f"Answer: {answer}\nClues:")
for choice in result["choices"]:
print(choice["text"])
if __name__ == "__main__":
get_clue("")
# convert_raw_clues("../clues/raw_clues.txt", "../clues/formatted.jsonl")
| [
"f\"Answer: {answer.lower()}\\nClue:"
] |
2024-01-10 | NusretOzates/langchain_retrieval_qa_bot | data_loaders.py | import re
from itertools import chain
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders import PyPDFLoader, TextLoader, UnstructuredURLLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.vectorstores.base import VectorStoreRetriever
def load_text_file(file_path: str) -> Document:
"""Loads a text file and returns a Document object.
Args:
file_path: Path to the text file.
Returns:
A Document object.
"""
doc = TextLoader(file_path, encoding="utf-8").load()[0]
return doc
def load_pdf_file(file_path: str) -> List[Document]:
"""Loads a pdf file and returns a list of Document objects.
Args:
file_path: Path to the pdf file.
Returns:
A list of Document objects. Every page in the pdf file is a Document object.
"""
loader = PyPDFLoader(file_path)
docs = loader.load()
return docs
def load_website(url: str) -> List[Document]:
"""Loads a website and returns a Document object.
Args:
url: Url of the website.
Returns:
A Document object.
"""
documents = UnstructuredURLLoader(
[url],
mode="elements",
headers={
"ssl_verify": "False",
},
).load()
processed_docs = []
# We are not rich, we need to eliminate some of the elements
for doc in documents:
# This will make us lose table information sorry about that :(
if doc.metadata.get("category") not in [
"NarrativeText",
"UncategorizedText",
"Title",
]:
continue
# Remove elements with empty links, they are mostly recommended articles etc.
if doc.metadata.get("links"):
link = doc.metadata["links"][0]["text"]
if link is None:
continue
link = link.replace(" ", "").replace("\n", "")
if len(link.split()) == 0:
continue
# Remove titles with links, they are mostly table of contents or navigation links
if doc.metadata.get("category") == "Title" and doc.metadata.get("links"):
continue
# Remove extra spaces
doc.page_content = re.sub(" +", " ", doc.page_content)
# Remove docs with less than 3 words
if len(doc.page_content.split()) < 3:
continue
processed_docs.append(doc)
# Instead of splitting element-wise, we merge all the elements and split them in chunks
merged_docs = "\n".join([doc.page_content for doc in processed_docs])
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
processed_docs = splitter.split_text(merged_docs)
processed_docs = [
Document(page_content=doc, metadata={"url": url}) for doc in processed_docs
]
return processed_docs
def load_text_files(file_paths: List[str]) -> List[Document]:
"""Loads a list of text files and returns a list of Document objects.
Args:
file_paths: List of paths to the text files.
Returns:
A list of Document objects.
"""
docs = [load_text_file(file_path) for file_path in file_paths]
return docs
def load_pdf_files(file_paths: List[str]) -> List[Document]:
"""Loads a list of pdf files and returns a list of Document objects.
Args:
file_paths: List of paths to the pdf files.
Returns:
A list of Document objects. Every page in the pdf file is a Document object.
"""
docs = [load_pdf_file(file_path) for file_path in file_paths]
docs = list(chain.from_iterable(docs))
return docs
def create_index(docs: List[Document]) -> VectorStoreRetriever:
"""Creates a vectorstore index from a list of Document objects.
Args:
docs: List of Document objects.
Returns:
A vectorstore index. It searches the most similar document to the given query but with
the help of MMR it also tries to find the most diverse document to the given query.
"""
index = VectorstoreIndexCreator(
vectorstore_cls=DocArrayInMemorySearch,
text_splitter=RecursiveCharacterTextSplitter(
chunk_size=1000, chunk_overlap=100
),
).from_documents(docs)
return index.vectorstore.as_retriever(search_type="mmr")
| [] |
2024-01-10 | Antrozhuk/telegramChatGPTBot | src~telegram_bot.py | import telegram.constants as constants
from telegram import Update
from telegram.ext import ApplicationBuilder, ContextTypes, CommandHandler, MessageHandler, filters
from src.openai_helper import OpenAIHelper
from src.logger import Logger
class ChatGPT3TelegramBot:
"""
Class representing a Chat-GPT3 Telegram Bot.
"""
def __init__(self, config: dict, openai: OpenAIHelper):
"""
Ініціалізує бот конфігурацією та GPT-3 налаштуваннями.
:param config: Словник з конфігурацією бота
:param openai: OpenAIHelper обʼєкт
:param disallowed_message: Повідомлення про відсутність доступу
"""
self.config = config
self.openai = openai
self.logger = Logger('telegram_bot').get_logger()
self.disallowed_message = "Вибачте, але вам не дозволено користуватись цим ботом."
async def start(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Показує початкове повідомлення.
"""
if await self.disallowed(update, context):
return
await update.message.reply_text("Привіт! Я бот, який відповідає на ваші повідомлення за допомогою ChatGPT-3.\n"
"Якщо ви хочете дізнатись більше про мене, введіть /help\n\n",
disable_web_page_preview=True)
async def help(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Показує допоміжне повідомлення.
"""
if await self.disallowed(update, context):
return
await update.message.reply_text("[Будь яке повідомлення] - Відправляє ваше повідомлення до AI\n"
"/help - Меню помічника\n"
"/random_answer - Генерує рандомну відповідь\n"
"/random_post - Генерує рандомний пост\n"
"/reset - Оновлює бесіду\n\n",
disable_web_page_preview=True)
async def reset(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Оновлює бесіду.
"""
if await self.disallowed(update, context):
return
self.logger.info(f'Resetting the conversation for {update.message.from_user}...')
chat_id = update.effective_chat.id
self.openai.reset_chat_history(chat_id=chat_id)
await context.bot.send_message(chat_id=chat_id, text='Готово!')
async def prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
React to incoming messages and respond accordingly.
"""
if await self.disallowed(update, context):
return
self.logger.info(f'New message "{update.message.text}" received from {update.message.from_user}')
chat_id = update.effective_chat.id
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.TYPING)
response = self.openai.get_chat_response(chat_id=chat_id, query=update.message.text)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.id,
parse_mode=constants.ParseMode.MARKDOWN,
text=response
)
async def random_answer(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Відправляє рандомну відповідь.
"""
if await self.disallowed(update, context):
return
self.logger.info(f'random_answer command received from {update.message.from_user}')
chat_id = update.effective_chat.id
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.TYPING)
response = self.openai.get_chat_response(chat_id=chat_id, query='напиши рандомну відповідь')
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.id,
parse_mode=constants.ParseMode.MARKDOWN,
text=response
)
async def random_post(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Відправляє рандомний пост.
"""
if await self.disallowed(update, context):
return
self.logger.info(f'random_post command received from {update.message.from_user}')
chat_id = update.effective_chat.id
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.TYPING)
response = self.openai.get_chat_response(chat_id=chat_id, query='напиши рандомний пост українською')
await context.bot.send_message(
chat_id=chat_id,
parse_mode=constants.ParseMode.MARKDOWN,
text=response
)
async def disallowed(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Відправляє повідомлення про відсутність доступів до користувача.
"""
if not await self.is_allowed(update):
self.logger.warning(f'User {update.message.from_user} is not allowed to use the bot')
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=self.disallowed_message,
disable_web_page_preview=True
)
return True
return False
async def error_handler(self, update: object, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Відловлює всі помилки.
"""
self.logger.debug(f'Exception while handling an update: {context.error}')
async def is_allowed(self, update: Update) -> bool:
"""
Перевіряє чи дозволено юзеру користуватись даним ботом.
"""
if self.config['allowed_user_ids'] == '*':
return True
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(update.message.from_user.id) in allowed_user_ids:
return True
return False
def run(self):
"""
Запускає бот доки користувач не натисне Ctrl+C
"""
application = ApplicationBuilder().token(self.config['token']).build()
application.add_handler(CommandHandler('start', self.start))
application.add_handler(CommandHandler('help', self.help))
application.add_handler(CommandHandler('reset', self.reset))
application.add_handler(CommandHandler('random_answer', self.random_answer))
application.add_handler(CommandHandler('random_post', self.random_post))
application.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), self.prompt))
application.add_error_handler(self.error_handler)
application.run_polling()
| [] |
2024-01-10 | aws-samples/aurora-postgresql-pgvector | DAT303~02_QuestionAndAnswering~rag_app.py | # Import libraries
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.vectorstores.pgvector import PGVector
from langchain.memory import ConversationSummaryBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import BedrockEmbeddings
from langchain.llms import Bedrock
from langchain.prompts import PromptTemplate
import streamlit as st
import boto3
from PIL import Image
import os
import traceback
# TODO: This function takes a list of PDF documents as input and extracts the text from them using PdfReader.
# It concatenates the extracted text and returns it.
# TODO: Given the extracted text, this function splits it into smaller chunks using the RecursiveCharacterTextSplitter module.
# The chunk size, overlap, and other parameters are configured to optimize processing efficiency.
# TODO: This function takes the text chunks as input and creates a vector store using Bedrock Embeddings (Titan) and pgvector.
# The vector store stores the vector representations of the text chunks, enabling efficient retrieval based on semantic similarity.
# TODO: In this function, a conversation chain is created using the conversational AI model (Anthropic's Claude v2), vector store (created in the previous function), and conversation memory (ConversationSummaryBufferMemory).
# This chain allows the Gen AI app to engage in conversational interactions.
# This function is responsible for processing the user's input question and generating a response from the chatbot
def handle_userinput(user_question):
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
if "messages" not in st.session_state:
st.session_state.messages = []
try:
response = st.session_state.conversation({'question': user_question})
except ValueError:
st.write("Sorry, I didn't understand that. Could you rephrase your question?")
print(traceback.format_exc())
return
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.success(message.content, icon="🤔")
else:
st.write(message.content)
# Streamlit components
def main():
# Set the page configuration for the Streamlit application, including the page title and icon.
st.set_page_config(page_title="Generative AI Q&A with Amazon Bedrock, Aurora PostgreSQL and pgvector",
layout="wide",
page_icon=":books::parrot:")
st.write(css, unsafe_allow_html=True)
logo_url = "static/Powered-By_logo-stack_RGB_REV.png"
st.sidebar.image(logo_url, width=150)
st.sidebar.markdown(
"""
### Instructions:
1. Browse and upload PDF files
2. Click Process
3. Type your question in the search bar to get more insights
"""
)
# Check if the conversation and chat history are not present in the session state and initialize them to None.
if "conversation" not in st.session_state:
st.session_state.conversation = get_conversation_chain(get_vectorstore(None))
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
# A header with the text appears at the top of the Streamlit application.
st.header("Generative AI Q&A with Amazon Bedrock, Aurora PostgreSQL and pgvector :books::parrot:")
subheader = '<p style="font-family:Calibri (Body); color:Grey; font-size: 16px;">Leverage Foundational Models from <a href="https://aws.amazon.com/bedrock/">Amazon Bedrock</a> and <a href="https://github.com/pgvector/pgvector">pgvector</a> as Vector Engine</p>'
# Write the CSS style to the Streamlit application, allowing you to customize the appearance.
st.markdown(subheader, unsafe_allow_html=True)
image = Image.open("static/RAG_APG.png")
st.image(image, caption='Generative AI Q&A with Amazon Bedrock, Aurora PostgreSQL and pgvector')
# Create a text input box where you can ask questions about your documents.
user_question = st.text_input("Ask a question about your documents:", placeholder="What is Amazon Aurora?")
# Define a Go button for user action
go_button = st.button("Submit", type="secondary")
# If the go button is pressed or the user enters a question, it calls the handle_userinput() function to process the user's input.
if go_button or user_question:
with st.spinner("Processing..."):
handle_userinput(user_question)
with st.sidebar:
st.subheader("Your documents")
pdf_docs = st.file_uploader(
"Upload your PDFs here and click on 'Process'", type="pdf", accept_multiple_files=True)
# If the user clicks the "Process" button, the following code is executed:
# i. raw_text = get_pdf_text(pdf_docs): retrieves the text content from the uploaded PDF documents.
# ii. text_chunks = get_text_chunks(raw_text): splits the text content into smaller chunks for efficient processing.
# iii. vectorstore = get_vectorstore(text_chunks): creates a vector store that stores the vector representations of the text chunks.
if st.button("Process"):
with st.spinner("Processing"):
# get pdf text
raw_text = get_pdf_text(pdf_docs)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(vectorstore)
st.success('PDF uploaded successfully!', icon="✅")
with st.sidebar:
st.divider()
st.sidebar.markdown(
"""
### Sample questions to get started:
1. What is Amazon Aurora?
2. How can I migrate from PostgreSQL to Aurora and the other way around?
3. What does "three times the performance of PostgreSQL" mean?
4. What is Aurora Standard and Aurora I/O-Optimized?
5. How do I scale the compute resources associated with my Amazon Aurora DB Instance?
6. How does Amazon Aurora improve my databases fault tolerance to disk failures?
7. How does Aurora improve recovery time after a database crash?
8. How can I improve upon the availability of a single Amazon Aurora database?
"""
)
if __name__ == '__main__':
# This function loads the environment variables from a .env file.
load_dotenv()
# Define the Bedrock client.
BEDROCK_CLIENT = boto3.client("bedrock-runtime", 'us-west-2')
# Create the connection string for pgvector from .env file.
CONNECTION_STRING = PGVector.connection_string_from_db_params(
driver = os.environ.get("PGVECTOR_DRIVER"),
user = os.environ.get("PGVECTOR_USER"),
password = os.environ.get("PGVECTOR_PASSWORD"),
host = os.environ.get("PGVECTOR_HOST"),
port = os.environ.get("PGVECTOR_PORT"),
database = os.environ.get("PGVECTOR_DATABASE")
)
main()
| [] |
2024-01-10 | WuQingYi20/InteractiveStory | wsgi.py | from flask import Flask, render_template, jsonify, request
import openai
import re
from prompts import prompts
from dotenv import load_dotenv
import os
# Load the .env file
load_dotenv()
app = Flask(__name__)
initialCall = True
currentDescription = ""
# Initialize OpenAI API with your API key
openai.api_key = os.getenv('OPENAI_API_KEY')
# Define a dictionary to store user progress data
user_data = {}
# Global variable to track initialization status
initialized = False
@app.route('/')
def index():
global initialized
global currentDescription
if initialized:
# Initialization has already been done, return JSON response
if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
return jsonify(story=user_data['story'], choices=user_data['choices'])
# Initialization has already been done, return HTML response
else:
return render_template('index.html', story=user_data['story'], choices=user_data['choices'])
else:
# Initialization code
systemRoleAuto = prompts['index']['System']
promptStory = prompts['index']['story']
storyResponse = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{systemRoleAuto}"},
{"role": "user", "content": f"{promptStory}"},
#{"role": "assistant", "content": f"{contentAssistant}"},
],
max_tokens= 1500,
)
story = storyResponse.choices[0].message['content']
currentDescription = story
choicesPrompt = prompts['index']['choices']
choiceResponse = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{systemRoleAuto}"},
{"role": "user", "content": f"{story} {choicesPrompt}"},
#{"role": "assistant", "content": f"{contentAssistant}"},
],
max_tokens= 1500,
)
#Insert <p> tags around each paragraph
formatted_story = format_story(story)
user_data['story'] = formatted_story
user_data['choices'] = choiceResponse.choices[0].message['content']
initialized = True
if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
return jsonify(story=story, choices=user_data['choices'])
else:
return render_template('index.html', story=story, choices=user_data['choices'])
# Define a route to handle user choices and update the story
@app.route('/next-page/<choice>')
def next_page(choice):
systemRoleAuto = prompts['next-page']['System']
originalStory = user_data['story'] + "\n" + choice
contentAssistant = prompts['next-page']['storyAssistant']
contentAssistantChoices = prompts['next-page']['choicesAssistant']
prompt_story = originalStory + "\n" + prompts['next-page']['story']
response_story = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{systemRoleAuto}"},
{"role": "user", "content": f"{prompt_story}"},
{"role": "assistant", "content": f"{contentAssistant}"},
],
max_tokens= 1500,
)
prompt_choices = originalStory + response_story.choices[0].message['content'] + "\n" + prompts['next-page']['choices']
response_choices = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{systemRoleAuto}"},
{"role": "user", "content": f"{prompt_choices}"},
{"role": "assistant", "content": f"{contentAssistantChoices}"},
],
max_tokens= 1500,
)
story = response_story.choices[0].message['content']
choices = response_choices.choices[0].message['content']
# get summary of previous story and actions by gpt-3.5-turbo and original story
prompt_summary = originalStory + "\n" + prompts['next-page']['summary']
response_summary = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{systemRoleAuto}"},
{"role": "user", "content": f"{prompt_summary}"},
#{"role": "assistant", "content": f"{contentAssistant}"},
],
max_tokens= 1500,
)
formatted_story = format_story(story)
user_data['story'] = formatted_story
user_data['choices'] = choices
user_data['summary'] = response_summary.choices[0].message['content']
return jsonify(story=formatted_story, choices=choices, summary=user_data['summary'])
def format_story(story):
# Split the text into paragraphs using a regular expression
paragraphs = re.split(r"\n\s*\n", story)
#Insert <p> tags around each paragraph
formatted_story = "\n".join([f"<p>{paragraph}</p>" for paragraph in paragraphs])
return formatted_story
if __name__ == '__main__':
app.run(debug=True)
| [
"\n",
"PLACEHOLDER PLACEHOLDER",
"PLACEHOLDER",
"originalStory + \"\\n\" + prompts['next-page']['story']",
"next-page",
"originalStory + \"\\n\" + prompts['next-page']['summary']",
"originalStory + response_story.choices[0].message['content'] + \"\\n\" + prompts['next-page']['choices']",
"content",
"index"
] |
2024-01-10 | yamdereneko/ymbot | src~chatGPT~Chat_GPT_API.py | # -*- coding: utf-8 -*-
import asyncio
import nonebot
from pydantic import BaseModel
from httpx import AsyncClient
import src.Data.jx3_Redis as redis
import openai
class Response(BaseModel):
"""返回数据模型"""
id: str
"""状态码"""
object: str
created: int
model: str
choices: list
"""返回消息字符串"""
usage: dict | list[dict]
"""返回数据"""
class ChatGPTAPI:
client: AsyncClient
def __init__(self):
proxy_url = "http://username:password@127.0.0.1:8888"
proxies = {"http": proxy_url, "https": proxy_url}
self.client = AsyncClient(proxies=proxies)
self.url = "https://api.openai.com/v1/completions"
async def call_api(self, content) -> Response:
red = redis.Redis()
chat_gpt_apikey = await red.query("chat_gpt_apikey")
Organization = await red.query("OpenAI-Organization")
"""请求api网站数据"""
headers = {
'Authorization': f'Bearer {chat_gpt_apikey}',
'OpenAI-Organization': Organization,
'Content-Type': 'application/json'
}
data = {
"model": "gpt-3.5-turbo",
"messages": [{"role": "user", "content": content}]
}
res = await self.client.post(url=self.url, json=data, headers=headers, timeout=3000)
print(res)
nonebot.logger.info(res.text)
return Response.parse_obj(res.json()) | [] |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 62