mlara commited on
Commit
7fb0689
1 Parent(s): 7f4c27a

third commit

Browse files
Files changed (1) hide show
  1. earnings_app.py +1 -20
earnings_app.py CHANGED
@@ -29,7 +29,6 @@ import llama_index
29
  from llama_index.embeddings import OpenAIEmbedding
30
  from llama_index import ServiceContext
31
  from llama_index.llms import OpenAI
32
- from llama_index.ingestion import IngestionPipeline
33
  from llama_index.node_parser import TokenTextSplitter
34
 
35
  set_global_handler("wandb", run_args={"project": "final-project-v1"})
@@ -106,10 +105,6 @@ text_splitter = TokenTextSplitter(
106
  chunk_size=chunk_size
107
  )
108
 
109
- node_parser_pipeline = IngestionPipeline(
110
- transformations=[text_splitter]
111
- )
112
-
113
  storage_context = wandb_callback.load_storage_context(
114
  artifact_url="llmop/final-project-v1/earnings-index:v0"
115
  )
@@ -138,20 +133,6 @@ def auto_retrieve_fn(
138
  response = query_engine.query(query)
139
  return str(response)
140
 
141
- # App
142
-
143
- # Pydantic is an easy way to define a schema
144
- class AutoRetrieveModel(BaseModel):
145
- query: str = Field(..., description="natural language query string")
146
- filter_key_list: List[str] = Field(
147
- ..., description="List of metadata filter field names"
148
- )
149
- filter_value_list: List[str] = Field(
150
- ...,
151
- description=(
152
- "List of metadata filter field values (corresponding to names specified in filter_key_list)"
153
- )
154
- )
155
 
156
  # Main function to extract information
157
  def extract_information():
@@ -183,4 +164,4 @@ def extract_information():
183
  # res = await extract_information_async(text)
184
  # print(res)
185
 
186
- asyncio.run(main())
 
29
  from llama_index.embeddings import OpenAIEmbedding
30
  from llama_index import ServiceContext
31
  from llama_index.llms import OpenAI
 
32
  from llama_index.node_parser import TokenTextSplitter
33
 
34
  set_global_handler("wandb", run_args={"project": "final-project-v1"})
 
105
  chunk_size=chunk_size
106
  )
107
 
 
 
 
 
108
  storage_context = wandb_callback.load_storage_context(
109
  artifact_url="llmop/final-project-v1/earnings-index:v0"
110
  )
 
133
  response = query_engine.query(query)
134
  return str(response)
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
  # Main function to extract information
138
  def extract_information():
 
164
  # res = await extract_information_async(text)
165
  # print(res)
166
 
167
+ # asyncio.run(main())