jadechoghari
commited on
Update modeling.py
Browse files- modeling.py +11 -0
modeling.py
CHANGED
@@ -8,6 +8,7 @@ import torch
|
|
8 |
from .dino_wrapper2 import DinoWrapper
|
9 |
from .transformer import TriplaneTransformer
|
10 |
from .synthesizer_part import TriplaneSynthesizer
|
|
|
11 |
|
12 |
class CameraEmbedder(nn.Module):
|
13 |
def __init__(self, raw_dim: int, embed_dim: int):
|
@@ -21,6 +22,16 @@ class CameraEmbedder(nn.Module):
|
|
21 |
def forward(self, x):
|
22 |
return self.mlp(x)
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
class LRMGeneratorConfig(PretrainedConfig):
|
25 |
model_type = "lrm_generator"
|
26 |
|
|
|
8 |
from .dino_wrapper2 import DinoWrapper
|
9 |
from .transformer import TriplaneTransformer
|
10 |
from .synthesizer_part import TriplaneSynthesizer
|
11 |
+
from .processor import LRMImageProcessor
|
12 |
|
13 |
class CameraEmbedder(nn.Module):
|
14 |
def __init__(self, raw_dim: int, embed_dim: int):
|
|
|
22 |
def forward(self, x):
|
23 |
return self.mlp(x)
|
24 |
|
25 |
+
# the processor
|
26 |
+
class LRMProcessor:
|
27 |
+
def __init__(self, source_size=512, *args, **kwargs):
|
28 |
+
self.image_processor = LRMImageProcessor(source_size, *args, **kwargs)
|
29 |
+
|
30 |
+
def process(self, image, batch_size=1):
|
31 |
+
# preprocess the image
|
32 |
+
processed_image, source_camera = self.image_processor(image)
|
33 |
+
return processed_image, source_camera
|
34 |
+
|
35 |
class LRMGeneratorConfig(PretrainedConfig):
|
36 |
model_type = "lrm_generator"
|
37 |
|