nikigoli commited on
Commit
c9af1ba
1 Parent(s): 9e759f0

Removed print statements

Browse files
models/GroundingDINO/groundingdino.py CHANGED
@@ -397,7 +397,6 @@ class GroundingDINO(nn.Module):
397
  dictionnaries containing the two above keys for each decoder layer.
398
  """
399
 
400
- print("inside forward")
401
  if targets is None:
402
  captions = kw["captions"]
403
  else:
@@ -409,7 +408,6 @@ class GroundingDINO(nn.Module):
409
  samples.device
410
  )
411
 
412
- print("tokenized text")
413
  one_hot_token = tokenized
414
 
415
  (
@@ -445,7 +443,6 @@ class GroundingDINO(nn.Module):
445
 
446
  bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768
447
 
448
- print("got bert output")
449
  encoded_text = self.feat_map(
450
  bert_output["last_hidden_state"]
451
  ) # bs, 195, d_model
@@ -494,7 +491,6 @@ class GroundingDINO(nn.Module):
494
  else:
495
  exemplar_tokens = None
496
 
497
- print("got visual exemplar tokens")
498
 
499
  else:
500
  features, poss = self.backbone(samples)
@@ -576,7 +572,6 @@ class GroundingDINO(nn.Module):
576
  srcs = []
577
  masks = []
578
  for l, feat in enumerate(features):
579
- print("l: " + str(l))
580
  src, mask = feat.decompose()
581
  srcs.append(self.input_proj[l](src))
582
  masks.append(mask)
@@ -598,13 +593,10 @@ class GroundingDINO(nn.Module):
598
  poss.append(pos_l)
599
 
600
  input_query_bbox = input_query_label = attn_mask = dn_meta = None
601
- print("passing info through transformer")
602
  hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(
603
  srcs, masks, input_query_bbox, poss, input_query_label, attn_mask, text_dict
604
  )
605
 
606
- print("passed info through transformer")
607
-
608
  # deformable-detr-like anchor update
609
  outputs_coord_list = []
610
  for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(
@@ -681,7 +673,6 @@ class GroundingDINO(nn.Module):
681
  # outputs['one_hot'].shape
682
  # torch.Size([4, 900, 256])
683
 
684
- print("returning out")
685
  return out
686
 
687
  @torch.jit.unused
 
397
  dictionnaries containing the two above keys for each decoder layer.
398
  """
399
 
 
400
  if targets is None:
401
  captions = kw["captions"]
402
  else:
 
408
  samples.device
409
  )
410
 
 
411
  one_hot_token = tokenized
412
 
413
  (
 
443
 
444
  bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768
445
 
 
446
  encoded_text = self.feat_map(
447
  bert_output["last_hidden_state"]
448
  ) # bs, 195, d_model
 
491
  else:
492
  exemplar_tokens = None
493
 
 
494
 
495
  else:
496
  features, poss = self.backbone(samples)
 
572
  srcs = []
573
  masks = []
574
  for l, feat in enumerate(features):
 
575
  src, mask = feat.decompose()
576
  srcs.append(self.input_proj[l](src))
577
  masks.append(mask)
 
593
  poss.append(pos_l)
594
 
595
  input_query_bbox = input_query_label = attn_mask = dn_meta = None
 
596
  hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(
597
  srcs, masks, input_query_bbox, poss, input_query_label, attn_mask, text_dict
598
  )
599
 
 
 
600
  # deformable-detr-like anchor update
601
  outputs_coord_list = []
602
  for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(
 
673
  # outputs['one_hot'].shape
674
  # torch.Size([4, 900, 256])
675
 
 
676
  return out
677
 
678
  @torch.jit.unused