jackboyla commited on
Commit
9dcc529
1 Parent(s): fa019ed

Updates readme

Browse files
Files changed (1) hide show
  1. README.md +105 -99
README.md CHANGED
@@ -22,12 +22,14 @@ tags:
22
 
23
  ## How to Get Started with the Model (Sample inference code)
24
 
25
- This code snippets show how to get quickly started with running the model on a GPU:
26
 
27
- ```python
28
  # install ollama
29
- # run:
30
- # `ollama pull hf.co/jackboyla/Phi-3-mini-4k-instruct-graph-GGUF:Q8_0`
 
 
31
  import requests
32
  import json
33
 
@@ -82,101 +84,105 @@ response = requests.post(url, json=payload)
82
 
83
  # Print the response
84
  print(response.status_code)
85
- print(response.json())
86
-
87
- # Output:
88
-
89
- # {
90
- # "nodes": [
91
- # {
92
- # "id": "OpenAI",
93
- # "type": "organization",
94
- # "detailed_type": "ai research organization"
95
- # },
96
- # {
97
- # "id": "GPT family",
98
- # "type": "technology",
99
- # "detailed_type": "large language models"
100
- # },
101
- # {
102
- # "id": "DALL-E series",
103
- # "type": "technology",
104
- # "detailed_type": "text-to-image models"
105
- # },
106
- # {
107
- # "id": "Sora",
108
- # "type": "technology",
109
- # "detailed_type": "text-to-video model"
110
- # },
111
- # {
112
- # "id": "ChatGPT",
113
- # "type": "technology",
114
- # "detailed_type": "generative ai"
115
- # },
116
- # {
117
- # "id": "San Francisco",
118
- # "type": "location",
119
- # "detailed_type": "city"
120
- # },
121
- # {
122
- # "id": "California",
123
- # "type": "location",
124
- # "detailed_type": "state"
125
- # },
126
- # {
127
- # "id": "December 2015",
128
- # "type": "date",
129
- # "detailed_type": "foundation date"
130
- # },
131
- # {
132
- # "id": "November 2022",
133
- # "type": "date",
134
- # "detailed_type": "release date"
135
- # }
136
- # ],
137
- # "edges": [
138
- # {
139
- # "from": "OpenAI",
140
- # "to": "San Francisco",
141
- # "label": "headquartered in"
142
- # },
143
- # {
144
- # "from": "San Francisco",
145
- # "to": "California",
146
- # "label": "located in"
147
- # },
148
- # {
149
- # "from": "OpenAI",
150
- # "to": "December 2015",
151
- # "label": "founded in"
152
- # },
153
- # {
154
- # "from": "OpenAI",
155
- # "to": "GPT family",
156
- # "label": "developed"
157
- # },
158
- # {
159
- # "from": "OpenAI",
160
- # "to": "DALL-E series",
161
- # "label": "developed"
162
- # },
163
- # {
164
- # "from": "OpenAI",
165
- # "to": "Sora",
166
- # "label": "developed"
167
- # },
168
- # {
169
- # "from": "OpenAI",
170
- # "to": "ChatGPT",
171
- # "label": "released"
172
- # },
173
- # {
174
- # "from": "ChatGPT",
175
- # "to": "November 2022",
176
- # "label": "released in"
177
- # }
178
- # ]
179
- # }
 
 
 
 
180
  ```
181
 
182
  ### About GGUF
 
22
 
23
  ## How to Get Started with the Model (Sample inference code)
24
 
25
+ This code snippets show how to get quickly started with running the model on a CPU with ollama:
26
 
27
+ ```bash
28
  # install ollama
29
+ ollama pull hf.co/jackboyla/Phi-3-mini-4k-instruct-graph-GGUF:Q8_0
30
+ ```
31
+
32
+ ```python
33
  import requests
34
  import json
35
 
 
84
 
85
  # Print the response
86
  print(response.status_code)
87
+ out = json.loads(response.content.decode('utf-8'))['message']['content']
88
+ print(json.dumps(json.loads(out), indent=2))
89
+
90
+ ```
91
+
92
+ Output:
93
+
94
+ ```json
95
+ {
96
+ "nodes": [
97
+ {
98
+ "id": "OpenAI",
99
+ "type": "organization",
100
+ "detailed_type": "ai research organization"
101
+ },
102
+ {
103
+ "id": "GPT family",
104
+ "type": "technology",
105
+ "detailed_type": "large language models"
106
+ },
107
+ {
108
+ "id": "DALL-E series",
109
+ "type": "technology",
110
+ "detailed_type": "text-to-image models"
111
+ },
112
+ {
113
+ "id": "Sora",
114
+ "type": "technology",
115
+ "detailed_type": "text-to-video model"
116
+ },
117
+ {
118
+ "id": "ChatGPT",
119
+ "type": "technology",
120
+ "detailed_type": "generative ai"
121
+ },
122
+ {
123
+ "id": "San Francisco",
124
+ "type": "location",
125
+ "detailed_type": "city"
126
+ },
127
+ {
128
+ "id": "California",
129
+ "type": "location",
130
+ "detailed_type": "state"
131
+ },
132
+ {
133
+ "id": "December 2015",
134
+ "type": "date",
135
+ "detailed_type": "foundation date"
136
+ },
137
+ {
138
+ "id": "November 2022",
139
+ "type": "date",
140
+ "detailed_type": "release date"
141
+ }
142
+ ],
143
+ "edges": [
144
+ {
145
+ "from": "OpenAI",
146
+ "to": "San Francisco",
147
+ "label": "headquartered in"
148
+ },
149
+ {
150
+ "from": "San Francisco",
151
+ "to": "California",
152
+ "label": "located in"
153
+ },
154
+ {
155
+ "from": "OpenAI",
156
+ "to": "December 2015",
157
+ "label": "founded in"
158
+ },
159
+ {
160
+ "from": "OpenAI",
161
+ "to": "GPT family",
162
+ "label": "developed"
163
+ },
164
+ {
165
+ "from": "OpenAI",
166
+ "to": "DALL-E series",
167
+ "label": "developed"
168
+ },
169
+ {
170
+ "from": "OpenAI",
171
+ "to": "Sora",
172
+ "label": "developed"
173
+ },
174
+ {
175
+ "from": "OpenAI",
176
+ "to": "ChatGPT",
177
+ "label": "released"
178
+ },
179
+ {
180
+ "from": "ChatGPT",
181
+ "to": "November 2022",
182
+ "label": "released in"
183
+ }
184
+ ]
185
+ }
186
  ```
187
 
188
  ### About GGUF