File size: 3,204 Bytes
9dd6856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import streamlit as st
import torch
import torch.nn as nn
from torchvision import transforms
from PIL import Image
from io import BytesIO
import requests

button_style = """
    <style>
    .center-align {
        display: flex;
        justify-content: center;
    }
    </style>
    """

DEVICE = 'cuda'

@st.cache_resource

class ConvAutoencoder(nn.Module):
    def __init__(self):
        super().__init__()
        # encoder 
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=4),
            nn.BatchNorm2d(32),
            nn.SELU()
            )
        self.conv2 = nn.Sequential(
            nn.Conv2d(32, 8, kernel_size=2),
            nn.BatchNorm2d(8),
            nn.SELU()
            )
        
        self.pool = nn.MaxPool2d(2, 2, return_indices=True, ceil_mode=True) #<<<<<< Bottleneck
        
        #decoder
        # Как работает Conv2dTranspose https://github.com/vdumoulin/conv_arithmetic

        self.unpool = nn.MaxUnpool2d(2, 2)
        
        self.conv1_t = nn.Sequential(
            nn.ConvTranspose2d(8, 32, kernel_size=2),
            nn.BatchNorm2d(32),
            nn.SELU()
            )
        self.conv2_t = nn.Sequential(
            nn.ConvTranspose2d(32, 1, kernel_size=4),
            nn.LazyBatchNorm2d(),
            nn.Sigmoid()
            )     

    def encode(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x, indicies = self.pool(x) # ⟸ bottleneck
        return x, indicies

    def decode(self, x, indicies):
        x = self.unpool(x, indicies)
        x = self.conv1_t(x)
        x = self.conv2_t(x)
        return x

    def forward(self, x):
        latent, indicies = self.encode(x)
        out = self.decode(latent, indicies)      
        return out
    
model = ConvAutoencoder().to(DEVICE)

model.load_state_dict(torch.load('D:\Bootcamp\phase_2\streamlit\\autoend.pt'))

transform = transforms.Compose([
    transforms.ToTensor(),  # Преобразование изображения в тензор
    # Добавьте другие необходимые преобразования, такие как нормализация, если это необходимо
])
model.eval()


image_source = st.radio("Choose the option of uploading the image of tumor:", ("File", "URL"))

if image_source == "File":
    uploaded_file = st.file_uploader("Upload the image", type=["jpg", "png", "jpeg"])
    if uploaded_file:
        image = Image.open(uploaded_file)
        
else:
    url = st.text_input("Enter the URL of image...")
    if url:
        response = requests.get(url)
        image = Image.open(BytesIO(response.content))


st.markdown(button_style, unsafe_allow_html=True)

model.to('cuda')

if 'image' in locals():
    st.image(image, caption="Uploaded image", use_column_width=True)

    bw_image = image.convert('L')

    image_tensor = transform(bw_image).unsqueeze(0)

    image_tensor = image_tensor.to('cuda')

    with torch.no_grad():
        output = model(image_tensor)

    output = transforms.ToPILImage()(output[0].cpu())

    if st.button("Detect tumor", type="primary"):
        st.image(output, caption="Annotated Image", use_column_width=True)