{ | |
"architectures": [ | |
"LocalLLM4GenModule" | |
], | |
"llm": "google/gemma-2-2b-it", | |
"model_type": "localllm4gen", | |
"template": "\nTask: \nGenerate textual response for conversational recommendation\n\nContext:\nYou are part of a Conversational Recommendation system. Your task is to generate a textual response with specific item recommendations into a cohesive and natural-sounding reply to the user.\nAnalyze the conversation history to understand the context and user's needs. Generate a response that is relevant and coherent to the conversatonal history.\n\nExample:\n- Input: \nHi, I love <e>Titanic</e> and <e>The_Godfather</e>! Could you recommend me some movies?\n- Output:\nGreat! You have excellent taste! If you enjoy those classics, you might also like <item> and <item>. Enjoy!\n\nInput:\n- Conversation history: \n<conversation>\n{history}\n</conversation>\n\nRequirement:\n- No code blocks, no chain-of-thoughts outputs or multiple responses. Keep response natural, concise and encouraging.\n- Complete the conversation with clean plain text response in one line. \n- If there are some concrete items in the response, they should be replaced with placeholders \"<item>\". \n\nOutput:\n", | |
"torch_dtype": "bfloat16", | |
"transformers_version": "4.44.0" | |
} | |