|
import { d as private_env } from './shared-server-49TKSBDM.js'; |
|
import { c as redirect, b as base } from './index-JNnR1J8_.js'; |
|
import { L as LlamaCppService } from './LlamaCppService-Bqc2roDm.js'; |
|
import 'fs'; |
|
import 'path'; |
|
|
|
const POST = async ({ locals, request }) => { |
|
const body = await request.json(); |
|
console.log(private_env.LLM_API_URL); |
|
const abortController = new AbortController(); |
|
let llmService = new LlamaCppService(private_env.LLM_API_URL); |
|
let llmGenerator = await llmService.predict(body.userprompt, { abortController }); |
|
const stream = new ReadableStream({ |
|
async start(controller) { |
|
try { |
|
for await (const output of await llmGenerator({ prompt: body.userprompt })) { |
|
controller.enqueue(output.token.text); |
|
} |
|
} catch (error2) { |
|
if (error2.name === "AbortError") { |
|
console.log("Request was aborted during LLMServer prediction."); |
|
} else { |
|
console.error("Error during LLMServer prediction:", error2); |
|
} |
|
} |
|
controller.close(); |
|
}, |
|
cancel() { |
|
console.log("ReadableStream canceled and aborted"); |
|
abortController.abort(); |
|
} |
|
}); |
|
return new Response(stream, { |
|
headers: { |
|
"content-type": "text/event-stream" |
|
} |
|
}); |
|
}; |
|
const GET = async () => { |
|
throw redirect(302, `${base}/`); |
|
}; |
|
|
|
export { GET, POST }; |
|
|
|
|