|
import streamlit as st |
|
import torch |
|
import torch.nn as nn |
|
from torchvision import transforms |
|
from PIL import Image |
|
from io import BytesIO |
|
import requests |
|
|
|
button_style = """ |
|
<style> |
|
.center-align { |
|
display: flex; |
|
justify-content: center; |
|
} |
|
</style> |
|
""" |
|
|
|
DEVICE = 'cuda' |
|
|
|
@st.cache_resource |
|
|
|
class ConvAutoencoder(nn.Module): |
|
def __init__(self): |
|
super().__init__() |
|
|
|
self.conv1 = nn.Sequential( |
|
nn.Conv2d(1, 32, kernel_size=4), |
|
nn.BatchNorm2d(32), |
|
nn.SELU() |
|
) |
|
self.conv2 = nn.Sequential( |
|
nn.Conv2d(32, 8, kernel_size=2), |
|
nn.BatchNorm2d(8), |
|
nn.SELU() |
|
) |
|
|
|
self.pool = nn.MaxPool2d(2, 2, return_indices=True, ceil_mode=True) |
|
|
|
|
|
|
|
|
|
self.unpool = nn.MaxUnpool2d(2, 2) |
|
|
|
self.conv1_t = nn.Sequential( |
|
nn.ConvTranspose2d(8, 32, kernel_size=2), |
|
nn.BatchNorm2d(32), |
|
nn.SELU() |
|
) |
|
self.conv2_t = nn.Sequential( |
|
nn.ConvTranspose2d(32, 1, kernel_size=4), |
|
nn.LazyBatchNorm2d(), |
|
nn.Sigmoid() |
|
) |
|
|
|
def encode(self, x): |
|
x = self.conv1(x) |
|
x = self.conv2(x) |
|
x, indicies = self.pool(x) |
|
return x, indicies |
|
|
|
def decode(self, x, indicies): |
|
x = self.unpool(x, indicies) |
|
x = self.conv1_t(x) |
|
x = self.conv2_t(x) |
|
return x |
|
|
|
def forward(self, x): |
|
latent, indicies = self.encode(x) |
|
out = self.decode(latent, indicies) |
|
return out |
|
|
|
model = ConvAutoencoder().to(DEVICE) |
|
|
|
model.load_state_dict(torch.load('D:\Bootcamp\phase_2\streamlit\\autoend.pt')) |
|
|
|
transform = transforms.Compose([ |
|
transforms.ToTensor(), |
|
|
|
]) |
|
model.eval() |
|
|
|
|
|
image_source = st.radio("Choose the option of uploading the image of tumor:", ("File", "URL")) |
|
|
|
if image_source == "File": |
|
uploaded_file = st.file_uploader("Upload the image", type=["jpg", "png", "jpeg"]) |
|
if uploaded_file: |
|
image = Image.open(uploaded_file) |
|
|
|
else: |
|
url = st.text_input("Enter the URL of image...") |
|
if url: |
|
response = requests.get(url) |
|
image = Image.open(BytesIO(response.content)) |
|
|
|
|
|
st.markdown(button_style, unsafe_allow_html=True) |
|
|
|
model.to('cuda') |
|
|
|
if 'image' in locals(): |
|
st.image(image, caption="Uploaded image", use_column_width=True) |
|
|
|
bw_image = image.convert('L') |
|
|
|
image_tensor = transform(bw_image).unsqueeze(0) |
|
|
|
image_tensor = image_tensor.to('cuda') |
|
|
|
with torch.no_grad(): |
|
output = model(image_tensor) |
|
|
|
output = transforms.ToPILImage()(output[0].cpu()) |
|
|
|
if st.button("Detect tumor", type="primary"): |
|
st.image(output, caption="Annotated Image", use_column_width=True) |
|
|