import streamlit as st import openai from pydub import AudioSegment import io import time # Set your OpenAI API key openai_api_key = "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" # Function to summarize the transcript into medical sections def medical_summary(transcript): prompt = f""" Organize the following medical transcript into the predefined sections: Sections: 1. Medical Specialty 2. CHIEF COMPLAINT 3. Purpose of visit 4. HISTORY and Physical - PAST MEDICAL HISTORY - PAST SURGICAL HISTORY - ALLERGIES History - Social History - REVIEW OF SYSTEMS 5. PHYSICAL EXAMINATION - GENERAL - Vitals - ENT - Head - Neck - Chest - Heart - Abdomen - Pelvic - Extremities Transcript: {transcript} Provide a structured summary in the above format. """ try: response = openai.ChatCompletion.create( model="gpt-4", # Use gpt-3.5-turbo if GPT-4 is unavailable messages=[ {"role": "system", "content": "You are a helpful medical assistant."}, {"role": "user", "content": prompt} ], temperature=0.5, max_tokens=3000 # Adjust max_tokens based on the expected size of the summary ) structured_summary = response['choices'][0]['message']['content'].strip() return structured_summary except Exception as e: return f"An error occurred: {e}" # Function to convert audio to text using OpenAI's Whisper or any suitable service def transcribe_audio(audio_file): audio = AudioSegment.from_file(audio_file) audio = audio.set_channels(1).set_frame_rate(16000) # Convert to mono and set appropriate frame rate buffer = io.BytesIO() audio.export(buffer, format="wav") buffer.seek(0) # Here, you would normally call the transcription API or process the file # Example (assuming transcription API integration): # response = openai.Audio.transcribe(model="whisper-1", file=buffer) return "Patient complains of chest pain, shortness of breath, and dizziness. He has a history of hypertension." # Streamlit UI def main(): # Custom CSS to improve UI aesthetics st.markdown(""" """, unsafe_allow_html=True) # Title and subtitle st.markdown('
Medical Transcription Dashboard
', unsafe_allow_html=True) st.markdown('
Upload an audio file or enter the transcript to generate a medical summary
', unsafe_allow_html=True) # Layout: Input Area in Card with st.container(): st.markdown('
', unsafe_allow_html=True) # Audio upload and text input options audio_file = st.file_uploader("Upload an audio file (wav, mp3, etc.)", type=["wav", "mp3"], label_visibility="collapsed") transcript_text = st.text_area("Or, enter the transcript manually:", height=200) # Show "Generate Medical Summary" button if st.button("Generate Medical Summary", key="generate", use_container_width=True): if audio_file: # Show loading spinner while processing with st.spinner('Transcribing audio...'): transcript = transcribe_audio(audio_file) time.sleep(2) # Simulate delay st.write(f"Transcript: {transcript}") elif transcript_text: transcript = transcript_text else: st.markdown('
Please upload an audio file or enter a transcript.
', unsafe_allow_html=True) if transcript: # Show loading spinner while generating summary with st.spinner('Generating Medical Summary...'): time.sleep(2) # Simulate processing time summary = medical_summary(transcript) st.markdown("### Medical Summary:") st.write(summary) st.markdown('
', unsafe_allow_html=True) if __name__ == "__main__": main()