File size: 4,783 Bytes
74cd228
62b7bc7
 
 
 
 
 
 
74cd228
62b7bc7
74cd228
 
 
 
 
 
 
62b7bc7
 
 
 
 
 
 
74cd228
62b7bc7
 
 
 
 
 
 
74cd228
 
 
 
 
 
 
62b7bc7
74cd228
62b7bc7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74cd228
 
62b7bc7
74cd228
62b7bc7
74cd228
 
62b7bc7
74cd228
 
62b7bc7
 
 
 
 
 
 
 
74cd228
62b7bc7
 
74cd228
 
62b7bc7
74cd228
62b7bc7
 
74cd228
 
62b7bc7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74cd228
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
"""
app_utils.py

Description: This file contains utility functions to support the Streamlit app. 
These functions handle file processing, video conversion, and inference running 
on uploaded images and videos.

Author: Austin Powell
"""

import pandas as pd
import altair as alt
import io
import av
from tqdm import tqdm
import numpy as np
import logging
import streamlit as st

def extract_file_datetime(fname):
    """Extract datetime from file name
    
    Args:
        fname (str): File name

    Returns:
        pd.datetime: Datetime extracted from file name  
    """
    fname = os.path.basename(fname)
    dt = fname.split("_")[1]
    h,m,s = fname.split("_")[2].split(".")[0].split("-")
    return pd.to_datetime(f"{dt} {h}:{m}:{s}")

def frames_to_video(frames=None, fps=12):
    """
    Convert frames to video for Streamlit

    Args:
        frames: frame from cv2.VideoCapture as numpy. E.g. frame.astype(np.uint8)
        fps: Frames per second. Useful if the inference video is compressed to slow down for analysis
    """

    # Grab information from the first frame
    height, width, layers = frames[0].shape

    # Create a BytesIO "in memory file"
    output_memory_file = io.BytesIO()

    # Open "in memory file" as MP4 video output
    output = av.open(output_memory_file, "w", format="mp4")

    # Add H.264 video stream to the MP4 container, with framerate = fps
    stream = output.add_stream("h264", str(fps))

    # Set frame width and height
    stream.width = width
    stream.height = height

    # Set pixel format (yuv420p for better compatibility)
    stream.pix_fmt = "yuv420p"

    # Select low crf for high quality (the price is larger file size)
    stream.options = {
        "crf": "17"
    }

    # Iterate through the frames, encode, and write to MP4 memory file
    logging.info("INFO: Encoding frames and writing to MP4 format.")
    for frame in tqdm(frames):
        # Convert frame to av.VideoFrame format
        frame = av.VideoFrame.from_ndarray(frame.astype(np.uint8), format="bgr24")

        # Encode the video frame
        packet = stream.encode(frame)

        # "Mux" the encoded frame (add the encoded frame to MP4 file)
        output.mux(packet)

    # Flush the encoder
    packet = stream.encode(None)
    output.mux(packet)

    # Close the output video file
    output.close()

    # Reset the file pointer to the beginning of the memory file
    output_memory_file.seek(0)

    # Return the output memory file
    return output_memory_file

def process_uploaded_file():
    st.subheader("Upload your own video...")

    # Initialize accepted file types for upload
    img_types = ["jpg", "png", "jpeg"]
    video_types = ["mp4", "avi"]

    # Allow user to upload an image or video file
    uploaded_file = st.file_uploader("Select an image or video file...", type=img_types + video_types)

    # Display the uploaded file
    if uploaded_file is not None:
        if str(uploaded_file.type).split("/")[-1] in img_types:
            # Display uploaded image
            image = Image.open(uploaded_file)
            st.image(image, caption="Uploaded image", use_column_width=True)

            # TBD: Inference code to run and display for single image

        elif str(uploaded_file.type).split("/")[-1] in video_types:
            # Display uploaded video
            st.video(uploaded_file)

            # Convert streamlit video object to OpenCV format to run inferences
            tfile = tempfile.NamedTemporaryFile(delete=False)
            tfile.write(uploaded_file.read())
            vf = cv.VideoCapture(tfile.name)

            # Run inference on the uploaded video
            with st.spinner("Running inference..."):
                frames, counts, timestamps = inference.main(vf)
            logging.info("INFO: Completed running inference on frames")
            st.balloons()

            # Convert OpenCV Numpy frames in-memory to IO Bytes for streamlit
            streamlit_video_file = frames_to_video(frames=frames, fps=11)

            # Show processed video and provide download button
            st.video(streamlit_video_file)
            st.download_button(
                label="Download processed video",
                data=streamlit_video_file,
                mime="mp4",
                file_name="processed_video.mp4",
            )

            # Create dataframe for fish counts and timestamps
            df_counts_time = pd.DataFrame(
                data={"fish_count": counts, "timestamps": timestamps[1:]}
            )

            # Display fish count vs. timestamp chart
            st.altair_chart(
                plot_count_date(dataframe=df_counts_time),
                use_container_width=True,
            )

    else:
        st.write("No file uploaded")