File size: 6,535 Bytes
7358a9f
 
 
 
 
 
 
 
 
 
 
 
 
 
6490202
38fd6a3
7358a9f
 
 
 
 
 
38fd6a3
7358a9f
3e1ee6c
979b378
7358a9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
979b378
 
7358a9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
979b378
7358a9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
# -*- coding: utf-8 -*-
"""chatbot.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1zgihAeNpcDd0opNPsbnmS2UNgz0rsfOB
"""

from langchain.text_splitter import CharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader

import logging
import sys
from pathlib import Path
import os

logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.llms import HuggingFaceLLM
from langchain_community.document_loaders import PyPDFLoader

documents = SimpleDirectoryReader("Dat").load_data()
documents = [CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=0).split_text(doc) for doc in documents]

from llama_index.prompts.prompts import SimpleInputPrompt


system_prompt =  """Emphasize empathy and active listening: Create a safe space for users to share their thoughts and feelings without judgment.
Focus on understanding and validation: Reflect back user emotions and experiences to demonstrate understanding and build trust.
Offer evidence-based support: Provide grounding techniques, coping strategies, and psychoeducation based on sound mental health principles.
Personalize responses: Tailor interactions to individual needs, preferences, and goals.
Maintain ethical boundaries: Respect user privacy, confidentiality, and autonomy.
Recognize limitations: Acknowledge that the chatbot is not a replacement for professional therapy and encourage seeking licensed help when needed.
Key goals:

Reduce symptoms of anxiety, depression, and stress.
Improve emotional regulation and coping skills.
Enhance self-awareness and self-compassion.
Promote healthy relationships and communication.
Build resilience and problem-solving skills.
Encourage positive self-care and lifestyle choices.
Specific prompts:

"Greet the user warmly and introduce yourself as their AI therapist."
"Ask open-ended questions to elicit user thoughts, feelings, and concerns."
"Respond empathetically to user disclosures, validating their experiences."
"Offer appropriate mental health resources, exercises, or techniques based on user needs."
"Guide users through mindfulness exercises or relaxation techniques."
"Challenge negative thinking patterns and encourage cognitive reframing."
"Help users set realistic goals and track progress towards mental wellness."
"Provide psychoeducation on various mental health topics and treatment options."
"Conclude sessions with positive affirmations and encouragement."
"Remind users of the chatbot's limitations and the importance of seeking professional help."
"Always prioritize user safety and offer crisis resources in case of urgent needs."
Additional considerations:

Tailor prompts to specific mental health conditions or challenges.
Incorporate humor or lightheartedness when appropriate to build rapport.
Provide options for different communication styles (e.g., text, voice, interactive activities).
Continuously monitor and refine prompts based on user feedback and clinical expertise."""



# This will wrap the default prompts that are internal to llama-index
query_wrapper_prompt = SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>")

import torch

llm = HuggingFaceLLM(
    context_window=2048,
    max_new_tokens=128,
    generate_kwargs={"temperature": 0.5, "do_sample": False},
    system_prompt=system_prompt,
    query_wrapper_prompt=query_wrapper_prompt,
    tokenizer_name="NousResearch/Llama-2-7b-chat-hf",
    model_name="NousResearch/Llama-2-7b-chat-hf",
    device_map="auto",
    # uncomment this if using CUDA to reduce memory usage
    model_kwargs={"torch_dtype": torch.float16 })

from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings.langchain import LangchainEmbedding
from llama_index import ServiceContext

embed_model = LangchainEmbedding(
  HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
)

service_context = ServiceContext.from_defaults(
    chunk_size=512,
    llm=llm,
    embed_model=embed_model
)

index = VectorStoreIndex.from_documents(documents, service_context=service_context)

query_engine = index.as_query_engine()

import gradio as gr

def respond_to_user(input_text):
    response = query_engine.query(input_text)
    return response


import gradio as gr

def render_messages():
    messages = []
    if hasattr(iface, "_message_history"):
        for msg in iface._message_history[-3::-1]:
            messages.append({"role": msg["role"], "content": msg["content"]})
        
    template = r"""
    <div class='card'>
        %for message in messages:
            {%if message['role'] == 'assistant':%}
                <div class='card bg-light mb-3' style='max-width: 50rem;'>
                    <div class='card-header'>Assistant</div>
                    <div class='card-body'>{{message['content']}}</div>
                </div>
            {%else:%}
                <div class='card' style='max-width: 50rem;'>
                    <div class='card-header'>User</div>
                    <div class='card-body'>{{message['content']}}</div>
                </div>
            {%endif%}
        %endfor%}
    </div>
    """
    return gr.outputs.HTML(template)

def process_inputs(input_text):
    result = respond_to_user(input_text)
    iface._message_history.append({"role": "assistant", "content": result})
    return "", render_messages()

iface = gr.Interface(fn=process_inputs,
                     inputs=gr.components.Textbox(lines=3, placeholder="Type something..."),
                     outputs=[],
                     title="AI Therapist Chatbot",
                     allow_flagging="never",
                     theme="monochrome",
                     css="""
                       .gradio-container > div:first-child{
                           margin-top: 0 !important;
                       }
                       .gradio-label-container label{
                           font-weight: bold;
                           color: black;
                       }
                   """,
                     article="""
                       Welcome to our AI Therapist chatbot! Feel free to ask anything related to mental health and receive guidance. Confidentiality and privacy notice: Your conversation remains private; we do not store any data.
                     """)

iface.render()