cfli commited on
Commit
90ed540
1 Parent(s): 4c91076

Upload 3 files

Browse files
config.json CHANGED
@@ -6,9 +6,9 @@
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
  "auto_map": {
9
- "AutoConfig": "cfli/MiniCPM-2B-reranker--configuration_minicpm_reranker.MiniCPMConfig",
10
- "LayerWiseMiniCPMModel": "cfli/MiniCPM-2B-reranker--modeling_minicpm.MiniCPMModel",
11
- "LayerWiseMiniCPMForCausalLM": "cfli/MiniCPM-2B-reranker--modeling_minicpm.MiniCPMForCausalLM"
12
  },
13
  "bos_token_id": 1,
14
  "dim_model_base": 256,
 
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
  "auto_map": {
9
+ "AutoConfig": "cfli/MiniCPM-2B-reranker--configuration_minicpm_reranker.LayerWiseMiniCPMConfig",
10
+ "LayerWiseMiniCPMModel": "cfli/MiniCPM-2B-reranker--modeling_minicpm.LayerWiseMiniCPMModel",
11
+ "LayerWiseMiniCPMForCausalLM": "cfli/MiniCPM-2B-reranker--modeling_minicpm.LayerWiseMiniCPMForCausalLM"
12
  },
13
  "bos_token_id": 1,
14
  "dim_model_base": 256,
configuration_minicpm_reranker.py CHANGED
@@ -28,7 +28,7 @@ logger = logging.get_logger(__name__)
28
  MINICPM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
29
 
30
 
31
- class MiniCPMConfig(PretrainedConfig):
32
  r"""
33
  This is the configuration class to store the configuration of a [`MiniCPMModel`]. It is used to instantiate an MiniCPM
34
  model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
 
28
  MINICPM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
29
 
30
 
31
+ class LayerWiseMiniCPMConfig(PretrainedConfig):
32
  r"""
33
  This is the configuration class to store the configuration of a [`MiniCPMModel`]. It is used to instantiate an MiniCPM
34
  model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
modeling_minicpm.py CHANGED
@@ -1491,126 +1491,4 @@ class LayerWiseMiniCPMForCausalLM(MiniCPMPreTrainedModel):
1491
  if len(matches) > 0:
1492
  response = matches[0]
1493
  history.append({"role": "assistant", "content": response})
1494
- return response, history
1495
-
1496
-
1497
- @add_start_docstrings(
1498
- """
1499
- The MiniCPM Model transformer with a sequence classification head on top (linear layer).
1500
-
1501
- [`MiniCPMForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1502
- (e.g. GPT-2) do.
1503
-
1504
- Since it does classification on the last token, it requires to know the position of the last token. If a
1505
- `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1506
- no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1507
- padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1508
- each row of the batch).
1509
- """,
1510
- MINICPM_START_DOCSTRING,
1511
- )
1512
- class MiniCPMForSequenceClassification(MiniCPMPreTrainedModel):
1513
- def __init__(self, config):
1514
- super().__init__(config)
1515
- self.num_labels = config.num_labels
1516
- self.model = MiniCPMModel(config)
1517
- self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1518
-
1519
- # Initialize weights and apply final processing
1520
- self.post_init()
1521
-
1522
- def get_input_embeddings(self):
1523
- return self.model.embed_tokens
1524
-
1525
- def set_input_embeddings(self, value):
1526
- self.model.embed_tokens = value
1527
-
1528
- @add_start_docstrings_to_model_forward(MINICPM_INPUTS_DOCSTRING)
1529
- def forward(
1530
- self,
1531
- input_ids: torch.LongTensor = None,
1532
- attention_mask: Optional[torch.Tensor] = None,
1533
- position_ids: Optional[torch.LongTensor] = None,
1534
- past_key_values: Optional[List[torch.FloatTensor]] = None,
1535
- inputs_embeds: Optional[torch.FloatTensor] = None,
1536
- labels: Optional[torch.LongTensor] = None,
1537
- use_cache: Optional[bool] = None,
1538
- output_attentions: Optional[bool] = None,
1539
- output_hidden_states: Optional[bool] = None,
1540
- return_dict: Optional[bool] = None,
1541
- ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1542
- r"""
1543
- labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1544
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1545
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1546
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1547
- """
1548
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1549
-
1550
- transformer_outputs = self.model(
1551
- input_ids,
1552
- attention_mask=attention_mask,
1553
- position_ids=position_ids,
1554
- past_key_values=past_key_values,
1555
- inputs_embeds=inputs_embeds,
1556
- use_cache=use_cache,
1557
- output_attentions=output_attentions,
1558
- output_hidden_states=output_hidden_states,
1559
- return_dict=return_dict,
1560
- )
1561
- hidden_states = transformer_outputs[0]
1562
- logits = self.score(hidden_states)
1563
-
1564
- if input_ids is not None:
1565
- batch_size = input_ids.shape[0]
1566
- else:
1567
- batch_size = inputs_embeds.shape[0]
1568
-
1569
- if self.config.pad_token_id is None and batch_size != 1:
1570
- raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1571
- if self.config.pad_token_id is None:
1572
- sequence_lengths = -1
1573
- else:
1574
- if input_ids is not None:
1575
- sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
1576
- logits.device
1577
- )
1578
- else:
1579
- sequence_lengths = -1
1580
-
1581
- pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1582
-
1583
- loss = None
1584
- if labels is not None:
1585
- labels = labels.to(logits.device)
1586
- if self.config.problem_type is None:
1587
- if self.num_labels == 1:
1588
- self.config.problem_type = "regression"
1589
- elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1590
- self.config.problem_type = "single_label_classification"
1591
- else:
1592
- self.config.problem_type = "multi_label_classification"
1593
-
1594
- if self.config.problem_type == "regression":
1595
- loss_fct = MSELoss()
1596
- if self.num_labels == 1:
1597
- loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1598
- else:
1599
- loss = loss_fct(pooled_logits, labels)
1600
- elif self.config.problem_type == "single_label_classification":
1601
- loss_fct = CrossEntropyLoss()
1602
- loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1603
- elif self.config.problem_type == "multi_label_classification":
1604
- loss_fct = BCEWithLogitsLoss()
1605
- loss = loss_fct(pooled_logits, labels)
1606
- if not return_dict:
1607
- output = (pooled_logits,) + transformer_outputs[1:]
1608
- return ((loss,) + output) if loss is not None else output
1609
-
1610
- return SequenceClassifierOutputWithPast(
1611
- loss=loss,
1612
- logits=pooled_logits,
1613
- past_key_values=transformer_outputs.past_key_values,
1614
- hidden_states=transformer_outputs.hidden_states,
1615
- attentions=transformer_outputs.attentions,
1616
- )
 
1491
  if len(matches) > 0:
1492
  response = matches[0]
1493
  history.append({"role": "assistant", "content": response})
1494
+ return response, history