code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from math import isqrt
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE_ ) if is_prime[i]]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 10**8 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = calculate_prime_numbers(max_number // 2 )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def UpperCamelCase ( self ) -> Any:
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
_SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(A__ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase ( self ) -> str:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase ( self ) -> Dict:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
_SCREAMING_SNAKE_CASE = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 0 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = params
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A__ ) -> Dict:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Tuple:
return len(self.lengths )
def UpperCamelCase ( self ) -> Dict:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.params.max_model_input_size
_SCREAMING_SNAKE_CASE = self.lengths > max_len
logger.info(F"Splitting {sum(A__ )} too long sequences." )
def divide_chunks(A__ , A__ ):
return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if self.params.mlm:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_SCREAMING_SNAKE_CASE = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ )
if sub_s[-1] != sep_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ )
assert len(A__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__ )
new_tok_ids.extend(A__ )
new_lengths.extend([len(A__ ) for l in sub_seqs] )
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array(A__ )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = self.lengths > 11
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def UpperCamelCase ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def UpperCamelCase ( self ) -> Optional[Any]:
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase ( self , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = [t[0] for t in batch]
_SCREAMING_SNAKE_CASE = [t[1] for t in batch]
assert len(A__ ) == len(A__ )
# Max for paddings
_SCREAMING_SNAKE_CASE = max(A__ )
# Pad token ids
if self.params.mlm:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""]
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids]
assert len(tk_ ) == len(A__ )
assert all(len(A__ ) == max_seq_len_ for t in tk_ )
_SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_)
_SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs)
return tk_t, lg_t
| 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCamelCase__ : Tuple = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , *A__ , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
super().__init__(*A__ , **A__ )
_SCREAMING_SNAKE_CASE = eval_examples
_SCREAMING_SNAKE_CASE = post_process_function
_SCREAMING_SNAKE_CASE = quant_trainer_args
_SCREAMING_SNAKE_CASE = 1_28 # default number of calibration samples
def UpperCamelCase ( self , A__=None ) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
_SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
_SCREAMING_SNAKE_CASE = self._remove_unused_columns(A__ , description="""Calibration""" )
return DataLoader(
A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , )
def UpperCamelCase ( self , A__=None ) -> str:
_SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
_SCREAMING_SNAKE_CASE = self.get_calib_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ )
model.eval()
quant_trainer.enable_calibration(A__ )
logger.info("""***** Running calibration *****""" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(A__ ):
# Prediction step
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prediction_step(A__ , A__ , prediction_loss_only=A__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = model
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
self.log(A__ )
else:
_SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ )
return metrics
def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ )
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ )
def UpperCamelCase ( self , A__="./" ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = next(iter(A__ ) )
# saving device - to make it consistent
_SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
_SCREAMING_SNAKE_CASE = tuple(v.to(A__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.model.to(A__ )
model.eval()
model.float()
_SCREAMING_SNAKE_CASE = model.module if hasattr(A__ , """module""" ) else model
quant_trainer.configure_model(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , """model.onnx""" )
logger.info(F"exporting model to {output_model_file}" )
_SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=A__ , )
logger.info("""onnx export finished""" )
| 0 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : str = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCamelCase__ : List[str] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
@dataclass
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'train'
SCREAMING_SNAKE_CASE = 'dev'
SCREAMING_SNAKE_CASE = 'test'
class _a :
"""simple docstring"""
@staticmethod
def UpperCamelCase ( A__ , A__ ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def UpperCamelCase ( A__ ) -> List[str]:
raise NotImplementedError
@staticmethod
def UpperCamelCase ( A__ , A__ , A__ , A__ , A__=False , A__="[CLS]" , A__=1 , A__="[SEP]" , A__=False , A__=False , A__=0 , A__=0 , A__=-1_00 , A__=0 , A__=True , ) -> List[InputFeatures]:
_SCREAMING_SNAKE_CASE = {label: i for i, label in enumerate(A__ )}
_SCREAMING_SNAKE_CASE = []
for ex_index, example in enumerate(A__ ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" , A__ , len(A__ ) )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for word, label in zip(example.words , example.labels ):
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A__ ) > 0:
tokens.extend(A__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add()
if len(A__ ) > max_seq_length - special_tokens_count:
_SCREAMING_SNAKE_CASE = tokens[: (max_seq_length - special_tokens_count)]
_SCREAMING_SNAKE_CASE = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_SCREAMING_SNAKE_CASE = [sequence_a_segment_id] * len(A__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_SCREAMING_SNAKE_CASE = [cls_token] + tokens
_SCREAMING_SNAKE_CASE = [pad_token_label_id] + label_ids
_SCREAMING_SNAKE_CASE = [cls_token_segment_id] + segment_ids
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(A__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_SCREAMING_SNAKE_CASE = [1 if mask_padding_with_zero else 0] * len(A__ )
# Zero-pad up to the sequence length.
_SCREAMING_SNAKE_CASE = max_seq_length - len(A__ )
if pad_on_left:
_SCREAMING_SNAKE_CASE = ([pad_token] * padding_length) + input_ids
_SCREAMING_SNAKE_CASE = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_SCREAMING_SNAKE_CASE = ([pad_token_segment_id] * padding_length) + segment_ids
_SCREAMING_SNAKE_CASE = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(A__ ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(A__ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(A__ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(A__ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(A__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_SCREAMING_SNAKE_CASE = None
features.append(
InputFeatures(
input_ids=A__ , attention_mask=A__ , token_type_ids=A__ , label_ids=A__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = nn.CrossEntropyLoss().ignore_index
def __init__( self , A__ , A__ , A__ , A__ , A__ , A__ = None , A__=False , A__ = Split.train , ) -> List[Any]:
# Load data features from cache or dataset file
_SCREAMING_SNAKE_CASE = os.path.join(
A__ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(A__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_SCREAMING_SNAKE_CASE = cached_features_file + """.lock"""
with FileLock(A__ ):
if os.path.exists(A__ ) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}" )
_SCREAMING_SNAKE_CASE = torch.load(A__ )
else:
logger.info(F"Creating features from dataset file at {data_dir}" )
_SCREAMING_SNAKE_CASE = token_classification_task.read_examples_from_file(A__ , A__ )
# TODO clean up all this to leverage built-in features of tokenizers
_SCREAMING_SNAKE_CASE = token_classification_task.convert_examples_to_features(
A__ , A__ , A__ , A__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"Saving features into cached file {cached_features_file}" )
torch.save(self.features , A__ )
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , A__ ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = -1_00
def __init__( self , A__ , A__ , A__ , A__ , A__ , A__ = None , A__=False , A__ = Split.train , ) -> Tuple:
_SCREAMING_SNAKE_CASE = token_classification_task.read_examples_from_file(A__ , A__ )
# TODO clean up all this to leverage built-in features of tokenizers
_SCREAMING_SNAKE_CASE = token_classification_task.convert_examples_to_features(
A__ , A__ , A__ , A__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_SCREAMING_SNAKE_CASE = tf.data.Dataset.from_generator(
A__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_SCREAMING_SNAKE_CASE = tf.data.Dataset.from_generator(
A__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Optional[Any]:
return len(self.features )
def __getitem__( self , A__ ) -> InputFeatures:
return self.features[i]
| 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("""https://huggingface.co""" )
| 0 | 1 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCamelCase__ : Any = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 48_000,
"sample_size": 65_536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 48_000,
"sample_size": 65_536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 48_000,
"sample_size": 131_072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
return torch.atana(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / math.pi * 2
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
_SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class _a (_lowerCamelCase):
"""simple docstring"""
pass
class _a (nn.Module):
"""simple docstring"""
def __init__( self , A__ ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(A__ , n_attn_layers=4 )
_SCREAMING_SNAKE_CASE = deepcopy(self.diffusion )
_SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 , scramble=A__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""url"""]
os.system(F"wget {url} ./" )
return F"./{model_name}.ckpt"
UpperCamelCase__ : Optional[int] = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
UpperCamelCase__ : List[str] = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
UpperCamelCase__ : str = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
UpperCamelCase__ : Union[str, Any] = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
UpperCamelCase__ : Optional[int] = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
UpperCamelCase__ : Any = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(F"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif name.startswith(SCREAMING_SNAKE_CASE_ ):
return [name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for v in value]
raise ValueError(F"Attn error with {name}" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
_SCREAMING_SNAKE_CASE = 0
if string.startswith("""net.3.""" ):
depth += 1
_SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith("""net.""" ):
_SCREAMING_SNAKE_CASE = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
_SCREAMING_SNAKE_CASE = string[7:]
if string.startswith("""main.""" ):
_SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
_SCREAMING_SNAKE_CASE = string[:2]
_SCREAMING_SNAKE_CASE = string[2:]
else:
_SCREAMING_SNAKE_CASE = string[0]
_SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
_SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = """mid_block"""
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) < 7:
_SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = F"down_blocks.{depth}"
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) > 7:
_SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = F"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
_SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = F"up_blocks.{max_depth - 1}" if int(SCREAMING_SNAKE_CASE_ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(F"Naming error with {input_string} and string_left: {string_left}." )
_SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
_SCREAMING_SNAKE_CASE = convert_resconv_naming(SCREAMING_SNAKE_CASE_ )
elif "attentions" in new_layer:
_SCREAMING_SNAKE_CASE = convert_attn_naming(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = prefix + """.""" + new_layer + """.""" + string_left
else:
_SCREAMING_SNAKE_CASE = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
_SCREAMING_SNAKE_CASE = rename(SCREAMING_SNAKE_CASE_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = transform_conv_attns(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
_SCREAMING_SNAKE_CASE = v
return new_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 1:
if len(v.shape ) == 3:
# weight
_SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
_SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
_SCREAMING_SNAKE_CASE = v.shape[0]
_SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
_SCREAMING_SNAKE_CASE = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
_SCREAMING_SNAKE_CASE = download(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""sample_rate"""]
_SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""sample_size"""]
_SCREAMING_SNAKE_CASE = Object()
_SCREAMING_SNAKE_CASE = sample_size
_SCREAMING_SNAKE_CASE = sample_rate
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE_ , sample_rate=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
_SCREAMING_SNAKE_CASE = DiffusionUncond(SCREAMING_SNAKE_CASE_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE_ )["""state_dict"""] )
_SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
_SCREAMING_SNAKE_CASE = orig_model.state_dict()
_SCREAMING_SNAKE_CASE = rename_orig_weights(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE_ ) == 0, F"Problem with {renamed_minus_diffusers}"
assert all(k.endswith("""kernel""" ) for k in list(SCREAMING_SNAKE_CASE_ ) ), F"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
_SCREAMING_SNAKE_CASE = value.squeeze()
_SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = 1_00
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE_ )[:-1]
_SCREAMING_SNAKE_CASE = get_crash_schedule(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
_SCREAMING_SNAKE_CASE = pipe(num_inference_steps=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).audios
_SCREAMING_SNAKE_CASE = sampling.iplms_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {} )
_SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
_SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
_SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , SCREAMING_SNAKE_CASE_ )
print("""Diff max""" , SCREAMING_SNAKE_CASE_ )
assert diff_max < 1e-3, F"Diff max: {diff_max} is too much :-/"
print(F"Conversion for {model_name} successful!" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase__ : int = parser.parse_args()
main(args)
| 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> Iterator[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE_ ):
yield num
num += 1
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2_00_00_00 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda SCREAMING_SNAKE_CASE_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ : str = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
UpperCamelCase__ : Optional[Any] = {"allegro/herbert-base-cased": 514}
UpperCamelCase__ : List[str] = {}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = HerbertTokenizer
def __init__( self , A__=None , A__=None , A__=None , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__="</s>" , **A__ , ) -> List[str]:
super().__init__(
A__ , A__ , tokenizer_file=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , sep_token=A__ , **A__ , )
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self , A__ , A__ = None , A__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A__ ) , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
| 0 | 1 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("""https://huggingface.co""" )
| 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]:
super().__init__(self , **A__ )
_SCREAMING_SNAKE_CASE = repo_info
_SCREAMING_SNAKE_CASE = token
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]:
if not isinstance(self.repo_info , A__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha )
return fsspec.open(
A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase ( self , A__ , **A__ ) -> str:
self._get_dirs()
_SCREAMING_SNAKE_CASE = self._strip_protocol(A__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A__ )
def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]:
self._get_dirs()
_SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
_SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = p.parent
if root == path:
_SCREAMING_SNAKE_CASE = f
_SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 0 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = [parquet_path]
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=("train",) ) -> List[str]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for split in splits:
_SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if split:
_SCREAMING_SNAKE_CASE = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE = """train"""
_SCREAMING_SNAKE_CASE = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list[list]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE_ ):
if magnitude == 0:
_SCREAMING_SNAKE_CASE = column
continue
_SCREAMING_SNAKE_CASE = column / magnitude
# Subtract to cancel term
_SCREAMING_SNAKE_CASE = current_set[0]
_SCREAMING_SNAKE_CASE = [first_row]
_SCREAMING_SNAKE_CASE = current_set[1::]
for row in current_set:
_SCREAMING_SNAKE_CASE = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE_ )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_SCREAMING_SNAKE_CASE = final_set[0]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_SCREAMING_SNAKE_CASE = simplify(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = resultant
return final_set
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) + 1
if any(len(SCREAMING_SNAKE_CASE_ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_SCREAMING_SNAKE_CASE = equations.copy()
if any(0 in row for row in data_set ):
_SCREAMING_SNAKE_CASE = data_set.copy()
_SCREAMING_SNAKE_CASE = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE_ ):
if 0 not in row:
_SCREAMING_SNAKE_CASE = data_set.pop(SCREAMING_SNAKE_CASE_ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = data_set.copy()
_SCREAMING_SNAKE_CASE = simplify(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = simplified[::-1]
_SCREAMING_SNAKE_CASE = []
for row in simplified:
_SCREAMING_SNAKE_CASE = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_SCREAMING_SNAKE_CASE = row.copy()[: len(SCREAMING_SNAKE_CASE_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
solutions.append(0 )
continue
_SCREAMING_SNAKE_CASE = temp_row[1::]
_SCREAMING_SNAKE_CASE = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE_ ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="attention" ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
_SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
_SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
_SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict:
"""simple docstring"""
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
_SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
_SCREAMING_SNAKE_CASE = (wi_a, wi_a)
else:
_SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
_SCREAMING_SNAKE_CASE = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , *, SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(variables["""target"""] )
_SCREAMING_SNAKE_CASE = {"""/""".join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_SCREAMING_SNAKE_CASE = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = collections.OrderedDict()
# Shared embeddings.
_SCREAMING_SNAKE_CASE = old["""token_embedder/embedding"""]
# Encoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , """pre_attention_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , """attention""" )
_SCREAMING_SNAKE_CASE = layer_norm
_SCREAMING_SNAKE_CASE = k.T
_SCREAMING_SNAKE_CASE = o.T
_SCREAMING_SNAKE_CASE = q.T
_SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (MLP).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , """pre_mlp_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """encoder""" , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = wi[0].T
_SCREAMING_SNAKE_CASE = wi[1].T
else:
_SCREAMING_SNAKE_CASE = wi.T
_SCREAMING_SNAKE_CASE = wo.T
_SCREAMING_SNAKE_CASE = old[
"""encoder/relpos_bias/rel_embedding"""
].T
_SCREAMING_SNAKE_CASE = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """pre_self_attention_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """self_attention""" )
_SCREAMING_SNAKE_CASE = layer_norm
_SCREAMING_SNAKE_CASE = k.T
_SCREAMING_SNAKE_CASE = o.T
_SCREAMING_SNAKE_CASE = q.T
_SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (Cross Attention).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """pre_cross_attention_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """encoder_decoder_attention""" )
_SCREAMING_SNAKE_CASE = layer_norm
_SCREAMING_SNAKE_CASE = k.T
_SCREAMING_SNAKE_CASE = o.T
_SCREAMING_SNAKE_CASE = q.T
_SCREAMING_SNAKE_CASE = v.T
# Block i, layer 2 (MLP).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , """pre_mlp_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """decoder""" , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = wi[0].T
_SCREAMING_SNAKE_CASE = wi[1].T
else:
_SCREAMING_SNAKE_CASE = wi.T
_SCREAMING_SNAKE_CASE = wo.T
_SCREAMING_SNAKE_CASE = old["""decoder/decoder_norm/scale"""]
_SCREAMING_SNAKE_CASE = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_SCREAMING_SNAKE_CASE = old["""decoder/logits_dense/kernel"""].T
return new
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
_SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""]
return state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_SCREAMING_SNAKE_CASE = TaEncoderModel(SCREAMING_SNAKE_CASE_ )
else:
_SCREAMING_SNAKE_CASE = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE_ )
print("""Done""" )
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
UpperCamelCase__ : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 0 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase__ : str = "sshleifer/tiny-mbart"
@require_torch
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , )
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=A__ )
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Any:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> str:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> List[str]:
self.run_seqaseq_quick(
distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ )
@require_apex
@require_torch_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCamelCase ( self , A__ ) -> List[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_SCREAMING_SNAKE_CASE = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_SCREAMING_SNAKE_CASE = experiments[experiment_id]
_SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_SCREAMING_SNAKE_CASE = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] )
_SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) )
self.assertEqual(A__ , data["""n_matches"""] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
# test if do_predict saves generations and metrics
_SCREAMING_SNAKE_CASE = os.listdir(A__ )
_SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A__ ) -> Tuple[int, float]:
_SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0"""
_SCREAMING_SNAKE_CASE = self.run_trainer(
max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig
_SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_SCREAMING_SNAKE_CASE = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict:
_SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split()
_SCREAMING_SNAKE_CASE = """
--do_predict
""".split()
_SCREAMING_SNAKE_CASE = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_SCREAMING_SNAKE_CASE = get_gpu_count()
_SCREAMING_SNAKE_CASE = get_torch_dist_unique_port()
_SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ , env=self.get_env() )
else:
_SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args
with patch.object(A__ , """argv""" , A__ ):
main()
return output_dir
| 0 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _a (unittest.TestCase):
"""simple docstring"""
def __init__( self , A__ , A__=7 , A__=3 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , A__=True , A__=1 / 2_55 , A__=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_pad
def UpperCamelCase ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self , A__ , A__=False ) -> List[str]:
if not batched:
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(A__ , Image.Image ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w )
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
elif w > h:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h )
else:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(A__ , key=lambda A__ : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """do_rescale""" ) )
self.assertTrue(hasattr(A__ , """do_pad""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCamelCase ( self ) -> List[str]:
pass
def UpperCamelCase ( self ) -> List[str]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
_SCREAMING_SNAKE_CASE = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A__ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> List[Any]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A__ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self ) -> Tuple:
# prepare image and target
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
_SCREAMING_SNAKE_CASE = DetaImageProcessor()
_SCREAMING_SNAKE_CASE = image_processing(images=A__ , annotations=A__ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A__ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A__ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A__ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A__ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A__ ) )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A__ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A__ ) )
@slow
def UpperCamelCase ( self ) -> List[str]:
# prepare image, target and masks_path
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
_SCREAMING_SNAKE_CASE = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_SCREAMING_SNAKE_CASE = DetaImageProcessor(format="""coco_panoptic""" )
_SCREAMING_SNAKE_CASE = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A__ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A__ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A__ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A__ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A__ ) )
# verify masks
_SCREAMING_SNAKE_CASE = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A__ )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A__ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A__ ) )
| 0 |
'''simple docstring'''
import sys
UpperCamelCase__ : int = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ):
_SCREAMING_SNAKE_CASE = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_SCREAMING_SNAKE_CASE = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCamelCase__ : List[Any] = pytest.mark.integration
UpperCamelCase__ : Optional[Any] = {"comet"}
UpperCamelCase__ : Any = importlib.util.find_spec("fairseq") is not None
UpperCamelCase__ : Optional[Any] = {"code_eval"}
UpperCamelCase__ : List[str] = os.name == "nt"
UpperCamelCase__ : Union[str, Any] = {"bertscore", "frugalscore", "perplexity"}
UpperCamelCase__ : int = importlib.util.find_spec("transformers") is not None
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , SCREAMING_SNAKE_CASE_ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , SCREAMING_SNAKE_CASE_ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , SCREAMING_SNAKE_CASE_ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names())
@for_all_test_methods(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
@local
class _a (parameterized.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def UpperCamelCase ( self , A__ ) -> Dict:
_SCREAMING_SNAKE_CASE = """[...]"""
_SCREAMING_SNAKE_CASE = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , A__ ) ).module_path )
_SCREAMING_SNAKE_CASE = datasets.load.import_main_class(metric_module.__name__ , dataset=A__ )
# check parameters
_SCREAMING_SNAKE_CASE = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(A__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
_SCREAMING_SNAKE_CASE = doctest.testmod(A__ , verbose=A__ , raise_on_error=A__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCamelCase ( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = """[...]"""
_SCREAMING_SNAKE_CASE = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , A__ ) ).module_path )
# run doctest
with self.use_local_metrics():
_SCREAMING_SNAKE_CASE = doctest.testmod(A__ , verbose=A__ , raise_on_error=A__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCamelCase ( self , A__ , A__ ) -> Dict:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](A__ ):
yield
else:
yield
@contextmanager
def UpperCamelCase ( self ) -> Any:
def load_local_metric(A__ , *A__ , **A__ ):
return load_metric(os.path.join("""metrics""" , A__ ) , *A__ , **A__ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
_SCREAMING_SNAKE_CASE = load_local_metric
yield
@classmethod
def UpperCamelCase ( cls , A__ ) -> List[Any]:
def wrapper(A__ ):
_SCREAMING_SNAKE_CASE = contextmanager(A__ )
_SCREAMING_SNAKE_CASE = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self , A__ ) -> List[str]:
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
_SCREAMING_SNAKE_CASE = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
_SCREAMING_SNAKE_CASE = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
def load_from_checkpoint(SCREAMING_SNAKE_CASE_ ):
class _a :
"""simple docstring"""
def UpperCamelCase ( self , A__ , *A__ , **A__ ) -> Optional[int]:
assert len(A__ ) == 2
_SCREAMING_SNAKE_CASE = [0.19, 0.92]
return scores, sum(A__ ) / len(A__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
_SCREAMING_SNAKE_CASE = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
_SCREAMING_SNAKE_CASE = load_from_checkpoint
yield
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
_SCREAMING_SNAKE_CASE = """ERROR"""
_SCREAMING_SNAKE_CASE = F"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(SCREAMING_SNAKE_CASE_ , match=re.escape(SCREAMING_SNAKE_CASE_ ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE_ )
| 0 |
'''simple docstring'''
UpperCamelCase__ : Dict = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
_SCREAMING_SNAKE_CASE = """"""
for word in coded.split():
while len(SCREAMING_SNAKE_CASE_ ) != 0:
decoded += decode_dict[word[:5]]
_SCREAMING_SNAKE_CASE = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 1 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
"""simple docstring"""
try:
_SCREAMING_SNAKE_CASE = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_SCREAMING_SNAKE_CASE = default
else:
# KEY is set, convert it to True or False.
try:
_SCREAMING_SNAKE_CASE = strtobool(SCREAMING_SNAKE_CASE_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCamelCase__ : Union[str, Any] = parse_flag_from_env("RUN_SLOW", default=False)
UpperCamelCase__ : int = parse_flag_from_env("RUN_REMOTE", default=False)
UpperCamelCase__ : List[Any] = parse_flag_from_env("RUN_LOCAL", default=True)
UpperCamelCase__ : List[Any] = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
UpperCamelCase__ : List[str] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
UpperCamelCase__ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
UpperCamelCase__ : List[str] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
UpperCamelCase__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
UpperCamelCase__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
UpperCamelCase__ : Optional[int] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
UpperCamelCase__ : List[str] = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_SCREAMING_SNAKE_CASE = unittest.skip("""test requires faiss""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_SCREAMING_SNAKE_CASE = unittest.skip("""test requires regex""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_SCREAMING_SNAKE_CASE = unittest.skip("""test requires elasticsearch""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_SCREAMING_SNAKE_CASE = unittest.skip("""test requires sqlalchemy""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_SCREAMING_SNAKE_CASE = unittest.skip("""test requires PyTorch""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
if not config.TF_AVAILABLE:
_SCREAMING_SNAKE_CASE = unittest.skip("""test requires TensorFlow""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_SCREAMING_SNAKE_CASE = unittest.skip("""test requires JAX""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_SCREAMING_SNAKE_CASE = unittest.skip("""test requires Pillow""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
def _require_spacy_model(SCREAMING_SNAKE_CASE_ ):
try:
import spacy # noqa F401
spacy.load(SCREAMING_SNAKE_CASE_ )
except ImportError:
return unittest.skip("""test requires spacy""" )(SCREAMING_SNAKE_CASE_ )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(SCREAMING_SNAKE_CASE_ ) )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_SCREAMING_SNAKE_CASE = unittest.skip("""test is slow""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_SCREAMING_SNAKE_CASE = unittest.skip("""test is local""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_SCREAMING_SNAKE_CASE = unittest.skip("""test is packaged""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_SCREAMING_SNAKE_CASE = unittest.skip("""test requires remote""" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase_ ( *SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(SCREAMING_SNAKE_CASE_ ) and name.startswith("""test""" ):
for decorator in decorators:
_SCREAMING_SNAKE_CASE = decorator(SCREAMING_SNAKE_CASE_ )
setattr(cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cls
return decorate
class _a (_lowerCamelCase):
"""simple docstring"""
pass
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
@contextmanager
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_=OfflineSimulationMode.CONNECTION_FAILS , SCREAMING_SNAKE_CASE_=1e-16 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = requests.Session().request
def timeout_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
# Change the url to an invalid url so that the connection hangs
_SCREAMING_SNAKE_CASE = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
_SCREAMING_SNAKE_CASE = timeout
try:
return online_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_SCREAMING_SNAKE_CASE = url
_SCREAMING_SNAKE_CASE = e.args[0]
_SCREAMING_SNAKE_CASE = (max_retry_error.args[0].replace("""10.255.255.1""" , F"OfflineMock[{url}]" ),)
_SCREAMING_SNAKE_CASE = (max_retry_error,)
raise
def raise_connection_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=SCREAMING_SNAKE_CASE_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , SCREAMING_SNAKE_CASE_ ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def lowerCAmelCase_ ( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = str(Path().resolve() )
with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) as tmp_dir:
try:
os.chdir(SCREAMING_SNAKE_CASE_ )
yield
finally:
os.chdir(SCREAMING_SNAKE_CASE_ )
@contextmanager
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
import gc
gc.collect()
_SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
_SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
return deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 1_00 , 10 ).tolist()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
try:
return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except HTTPError as err:
if str(SCREAMING_SNAKE_CASE_ ).startswith("""500""" ) or str(SCREAMING_SNAKE_CASE_ ).startswith("""502""" ):
pytest.xfail(str(SCREAMING_SNAKE_CASE_ ) )
raise err
return decorator.decorator(_wrapper , SCREAMING_SNAKE_CASE_ )
class _a :
"""simple docstring"""
def __init__( self , A__ , A__ , A__ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = returncode
_SCREAMING_SNAKE_CASE = stdout
_SCREAMING_SNAKE_CASE = stderr
async def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE_ )
else:
break
async def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(SCREAMING_SNAKE_CASE_ ) )
_SCREAMING_SNAKE_CASE = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
def tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="" ):
_SCREAMING_SNAKE_CASE = line.decode("""utf-8""" ).rstrip()
sink.append(SCREAMING_SNAKE_CASE_ )
if not quiet:
print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , file=SCREAMING_SNAKE_CASE_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stderr , label="""stderr:""" ) ),
] , timeout=SCREAMING_SNAKE_CASE_ , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1_80 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ) -> _RunOutput:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = asyncio.get_event_loop()
_SCREAMING_SNAKE_CASE = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE_ , env=SCREAMING_SNAKE_CASE_ , stdin=SCREAMING_SNAKE_CASE_ , timeout=SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ , echo=SCREAMING_SNAKE_CASE_ ) )
_SCREAMING_SNAKE_CASE = """ """.join(SCREAMING_SNAKE_CASE_ )
if result.returncode > 0:
_SCREAMING_SNAKE_CASE = """\n""".join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"'{cmd_str}' produced no output." )
return result
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_SCREAMING_SNAKE_CASE = re.sub(r"""^gw""" , """""" , SCREAMING_SNAKE_CASE_ , 0 , re.M )
return int(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2_95_00
_SCREAMING_SNAKE_CASE = pytest_xdist_worker_id()
return port + uniq_delta
| 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape
_SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
_SCREAMING_SNAKE_CASE = mam_aaa["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""]
_SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 1 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
# Initialise PyTorch model
_SCREAMING_SNAKE_CASE = BigBirdConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
_SCREAMING_SNAKE_CASE = BigBirdForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
else:
_SCREAMING_SNAKE_CASE = BigBirdForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , is_trivia_qa=SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCamelCase__ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : str = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : str = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"transformer.blocks.{i}.norm1.weight", F"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm1.bias", F"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.weight", F"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.bias", F"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.norm2.weight", F"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm2.bias", F"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.mlp.fc1.weight", F"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc1.bias", F"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.weight", F"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.bias", F"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.weight" )
_SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
if "vqa" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 31_29
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """vqa2-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = ViltForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
elif "nlvr" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = {0: """False""", 1: """True"""}
_SCREAMING_SNAKE_CASE = {v: k for k, v in config.idalabel.items()}
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = ViltForImagesAndTextClassification(SCREAMING_SNAKE_CASE_ )
elif "irtr" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = ViltForImageAndTextRetrieval(SCREAMING_SNAKE_CASE_ )
elif "mlm_itm" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = ViltForMaskedLM(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""state_dict"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if mlm_model or irtr_model:
_SCREAMING_SNAKE_CASE = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Define processor
_SCREAMING_SNAKE_CASE = ViltImageProcessor(size=3_84 )
_SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_SCREAMING_SNAKE_CASE = ViltProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
_SCREAMING_SNAKE_CASE = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=SCREAMING_SNAKE_CASE_ ).raw )
_SCREAMING_SNAKE_CASE = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=SCREAMING_SNAKE_CASE_ ).raw )
_SCREAMING_SNAKE_CASE = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
_SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_SCREAMING_SNAKE_CASE = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=SCREAMING_SNAKE_CASE_ ).raw )
if mlm_model:
_SCREAMING_SNAKE_CASE = """a bunch of [MASK] laying on a [MASK]."""
else:
_SCREAMING_SNAKE_CASE = """How many cats are there?"""
_SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
if mlm_model:
_SCREAMING_SNAKE_CASE = torch.Size([1, 11, 3_05_22] )
_SCREAMING_SNAKE_CASE = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
# verify masked token prediction equals "cats"
_SCREAMING_SNAKE_CASE = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_SCREAMING_SNAKE_CASE = torch.Size([1, 31_29] )
_SCREAMING_SNAKE_CASE = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
# verify vqa prediction equals "2"
_SCREAMING_SNAKE_CASE = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_SCREAMING_SNAKE_CASE = torch.Size([1, 2] )
_SCREAMING_SNAKE_CASE = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase__ : str = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor'
SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , A__=None , A__=None , **A__ ) -> int:
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
_SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Dict:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.decode(*A__ , **A__ )
@property
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , )
return self.image_processor_class
| 0 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase__ : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCamelCase__ : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCamelCase__ : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a (datasets.Metric):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def UpperCamelCase ( self , A__ , A__ , A__=None ) -> List[str]:
return {
"matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ),
}
| 0 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_SCREAMING_SNAKE_CASE = s_dict.pop(SCREAMING_SNAKE_CASE_ )
elif "subsample" in key:
_SCREAMING_SNAKE_CASE = s_dict.pop(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape
_SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = mam_aaa["""args"""]
_SCREAMING_SNAKE_CASE = mam_aaa["""model"""]
_SCREAMING_SNAKE_CASE = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
rename_keys(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""].shape[0]
_SCREAMING_SNAKE_CASE = args.share_decoder_input_output_embed
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in args.conv_kernel_sizes.split(""",""" )]
_SCREAMING_SNAKE_CASE = SpeechaTextConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(SCREAMING_SNAKE_CASE_ ) , conv_channels=args.conv_channels , conv_kernel_sizes=SCREAMING_SNAKE_CASE_ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=2_00 , use_cache=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=2 , early_stopping=SCREAMING_SNAKE_CASE_ , )
_SCREAMING_SNAKE_CASE = SpeechaTextForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0 and not set(SCREAMING_SNAKE_CASE_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F" but all the following weights are missing {missing}" )
if tie_embeds:
_SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_SCREAMING_SNAKE_CASE = lm_head_weights
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase__ : Dict = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(SCREAMING_SNAKE_CASE_ ):
print(F"{i}\t\t{d}" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count
_SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_SCREAMING_SNAKE_CASE = distance[u] + w
_SCREAMING_SNAKE_CASE = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : int = int(input("Enter number of vertices: ").strip())
UpperCamelCase__ : int = int(input("Enter number of edges: ").strip())
UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
UpperCamelCase__ : Optional[Any] = {"src": src, "dst": dest, "weight": weight}
UpperCamelCase__ : Optional[Any] = int(input("\nEnter shortest path source:").strip())
UpperCamelCase__ : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ : Optional[int] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 5_12
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerModel(config=A__ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(A__ )
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> str:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Dict:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=A__ )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> str:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A__ )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(A__ )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(A__ )[0]
# TODO Replace vocab size
_SCREAMING_SNAKE_CASE = 5_00_00
_SCREAMING_SNAKE_CASE = [1, 6, vocab_size]
self.assertEqual(output.shape , A__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-4 )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_SCREAMING_SNAKE_CASE = emba(input_ids.shape )
_SCREAMING_SNAKE_CASE = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
_SCREAMING_SNAKE_CASE = emba.weight[:3, :5]
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ) -> int:
# 2,12,16,64
_SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_SCREAMING_SNAKE_CASE = embed_positions([2, 16, 7_68] )[None, None, :, :]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A__ , A__ , A__ )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
| 0 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'gptj'
SCREAMING_SNAKE_CASE = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , A__=5_04_00 , A__=20_48 , A__=40_96 , A__=28 , A__=16 , A__=64 , A__=None , A__="gelu_new" , A__=0.0 , A__=0.0 , A__=0.0 , A__=1E-5 , A__=0.02 , A__=True , A__=5_02_56 , A__=5_02_56 , A__=False , **A__ , ) -> Tuple:
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = n_positions
_SCREAMING_SNAKE_CASE = n_embd
_SCREAMING_SNAKE_CASE = n_layer
_SCREAMING_SNAKE_CASE = n_head
_SCREAMING_SNAKE_CASE = n_inner
_SCREAMING_SNAKE_CASE = rotary_dim
_SCREAMING_SNAKE_CASE = activation_function
_SCREAMING_SNAKE_CASE = resid_pdrop
_SCREAMING_SNAKE_CASE = embd_pdrop
_SCREAMING_SNAKE_CASE = attn_pdrop
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
bos_token_id=A__ , eos_token_id=A__ , tie_word_embeddings=A__ , **A__ )
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__ = "default" , A__ = None , A__ = False , ) -> int:
super().__init__(A__ , task=A__ , patching_specs=A__ , use_past=A__ )
if not getattr(self._config , """pad_token_id""" , A__ ):
# TODO: how to do that better?
_SCREAMING_SNAKE_CASE = 0
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A__ , direction="""inputs""" )
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase ( self ) -> int:
return self._config.n_layer
@property
def UpperCamelCase ( self ) -> int:
return self._config.n_head
def UpperCamelCase ( self , A__ , A__ = -1 , A__ = -1 , A__ = False , A__ = None , ) -> Mapping[str, Any]:
_SCREAMING_SNAKE_CASE = super(A__ , self ).generate_dummy_inputs(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
# We need to order the input in the way they appears in the forward()
_SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_SCREAMING_SNAKE_CASE = seqlen + 2
_SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_SCREAMING_SNAKE_CASE = [
(torch.zeros(A__ ), torch.zeros(A__ )) for _ in range(self.num_layers )
]
_SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""]
if self.use_past:
_SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype
_SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A__ , A__ , dtype=A__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self ) -> int:
return 13
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 1 |
'''simple docstring'''
import sys
UpperCamelCase__ : int = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ):
_SCREAMING_SNAKE_CASE = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_SCREAMING_SNAKE_CASE = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XCLIPTextConfig()
# derive patch size from model name
_SCREAMING_SNAKE_CASE = model_name.find("""patch""" )
_SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
_SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
if model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = 3_36
_SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
_SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
_SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
_SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn.in_proj" in key:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if key.startswith("""visual""" ):
_SCREAMING_SNAKE_CASE = key_split[3]
_SCREAMING_SNAKE_CASE = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[
:dim
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
elif key.startswith("""mit""" ):
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.text_config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_SCREAMING_SNAKE_CASE = val.T
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
_SCREAMING_SNAKE_CASE = """eating_spaghetti.npy"""
elif num_frames == 32:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy"""
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , )
_SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
_SCREAMING_SNAKE_CASE = model_to_url[model_name]
_SCREAMING_SNAKE_CASE = 8
if "16-frames" in model_name:
_SCREAMING_SNAKE_CASE = 16
elif "shot" in model_name:
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
model.eval()
if "drive" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""]
else:
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""]
_SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24
_SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
_SCREAMING_SNAKE_CASE = outputs.logits_per_video
_SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 )
print("""Probs:""" , SCREAMING_SNAKE_CASE_ )
# kinetics-400
if model_name == "xclip-base-patch32":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase__ : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = params
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A__ ) -> Dict:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Tuple:
return len(self.lengths )
def UpperCamelCase ( self ) -> Dict:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.params.max_model_input_size
_SCREAMING_SNAKE_CASE = self.lengths > max_len
logger.info(F"Splitting {sum(A__ )} too long sequences." )
def divide_chunks(A__ , A__ ):
return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if self.params.mlm:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_SCREAMING_SNAKE_CASE = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ )
if sub_s[-1] != sep_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ )
assert len(A__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__ )
new_tok_ids.extend(A__ )
new_lengths.extend([len(A__ ) for l in sub_seqs] )
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array(A__ )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = self.lengths > 11
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def UpperCamelCase ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def UpperCamelCase ( self ) -> Optional[Any]:
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase ( self , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = [t[0] for t in batch]
_SCREAMING_SNAKE_CASE = [t[1] for t in batch]
assert len(A__ ) == len(A__ )
# Max for paddings
_SCREAMING_SNAKE_CASE = max(A__ )
# Pad token ids
if self.params.mlm:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""]
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids]
assert len(tk_ ) == len(A__ )
assert all(len(A__ ) == max_seq_len_ for t in tk_ )
_SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_)
_SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs)
return tk_t, lg_t
| 0 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase__ : List[Any] = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Any = "▁"
UpperCamelCase__ : Any = {"vocab_file": "spiece.model"}
UpperCamelCase__ : int = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
UpperCamelCase__ : Optional[int] = {
"google/reformer-crime-and-punishment": 524_288,
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , A__ , A__="</s>" , A__="<unk>" , A__=[] , A__ = None , **A__ , ) -> None:
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A__ , unk_token=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
@property
def UpperCamelCase ( self ) -> Any:
return self.sp_model.get_piece_size()
def UpperCamelCase ( self ) -> Dict[str, int]:
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A__ )
def UpperCamelCase ( self , A__ ) -> List[Any]:
if index < self.sp_model.get_piece_size():
_SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A__ )
return token
def UpperCamelCase ( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_SCREAMING_SNAKE_CASE = [p / w for p, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_SCREAMING_SNAKE_CASE = sorted(SCREAMING_SNAKE_CASE_ )
# declaring useful variables
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_SCREAMING_SNAKE_CASE = sorted_profit_by_weight[length - i - 1]
_SCREAMING_SNAKE_CASE = profit_by_weight.index(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
UpperCamelCase__ : Optional[Any] = [int(x) for x in input("Input profits separated by spaces: ").split()]
UpperCamelCase__ : Union[str, Any] = [int(x) for x in input("Input weights separated by spaces: ").split()]
UpperCamelCase__ : List[Any] = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def UpperCamelCase ( self ) -> Any:
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
_SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(A__ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase ( self ) -> str:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase ( self ) -> Dict:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
_SCREAMING_SNAKE_CASE = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 0 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCamelCase__ : Any = TypeVar("T")
UpperCamelCase__ : Optional[int] = TypeVar("U")
class _a (Generic[T, U]):
"""simple docstring"""
def __init__( self , A__ , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = key
_SCREAMING_SNAKE_CASE = val
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class _a (Generic[T, U]):
"""simple docstring"""
def __init__( self ) -> None:
_SCREAMING_SNAKE_CASE = DoubleLinkedListNode(A__ , A__ )
_SCREAMING_SNAKE_CASE = DoubleLinkedListNode(A__ , A__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.rear, self.head
def __repr__( self ) -> str:
_SCREAMING_SNAKE_CASE = ["""DoubleLinkedList"""]
_SCREAMING_SNAKE_CASE = self.head
while node.next is not None:
rep.append(str(A__ ) )
_SCREAMING_SNAKE_CASE = node.next
rep.append(str(self.rear ) )
return ",\n ".join(A__ )
def UpperCamelCase ( self , A__ ) -> None:
_SCREAMING_SNAKE_CASE = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_SCREAMING_SNAKE_CASE = node
_SCREAMING_SNAKE_CASE = previous
_SCREAMING_SNAKE_CASE = node
_SCREAMING_SNAKE_CASE = self.rear
def UpperCamelCase ( self , A__ ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
_SCREAMING_SNAKE_CASE = node.next
_SCREAMING_SNAKE_CASE = node.prev
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
return node
class _a (Generic[T, U]):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
def __init__( self , A__ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = DoubleLinkedList()
_SCREAMING_SNAKE_CASE = capacity
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = {}
def __repr__( self ) -> str:
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self , A__ ) -> bool:
return key in self.cache
def UpperCamelCase ( self , A__ ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_SCREAMING_SNAKE_CASE = self.cache[key]
_SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(A__ )
return node.val
self.miss += 1
return None
def UpperCamelCase ( self , A__ , A__ ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_SCREAMING_SNAKE_CASE = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(A__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_SCREAMING_SNAKE_CASE = DoubleLinkedListNode(A__ , A__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_SCREAMING_SNAKE_CASE = value
self.list.add(A__ )
@classmethod
def UpperCamelCase ( cls , A__ = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(A__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*A__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_SCREAMING_SNAKE_CASE = LRUCache(A__ )
_SCREAMING_SNAKE_CASE = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_SCREAMING_SNAKE_CASE = func(*A__ )
cls.decorator_function_to_instance_map[func].put(args[0] , A__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(A__ , """cache_info""" , A__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCamelCase__ : Tuple = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , *A__ , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
super().__init__(*A__ , **A__ )
_SCREAMING_SNAKE_CASE = eval_examples
_SCREAMING_SNAKE_CASE = post_process_function
_SCREAMING_SNAKE_CASE = quant_trainer_args
_SCREAMING_SNAKE_CASE = 1_28 # default number of calibration samples
def UpperCamelCase ( self , A__=None ) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
_SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
_SCREAMING_SNAKE_CASE = self._remove_unused_columns(A__ , description="""Calibration""" )
return DataLoader(
A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , )
def UpperCamelCase ( self , A__=None ) -> str:
_SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
_SCREAMING_SNAKE_CASE = self.get_calib_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ )
model.eval()
quant_trainer.enable_calibration(A__ )
logger.info("""***** Running calibration *****""" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(A__ ):
# Prediction step
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prediction_step(A__ , A__ , prediction_loss_only=A__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = model
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
self.log(A__ )
else:
_SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ )
return metrics
def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ )
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ )
def UpperCamelCase ( self , A__="./" ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = next(iter(A__ ) )
# saving device - to make it consistent
_SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
_SCREAMING_SNAKE_CASE = tuple(v.to(A__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.model.to(A__ )
model.eval()
model.float()
_SCREAMING_SNAKE_CASE = model.module if hasattr(A__ , """module""" ) else model
quant_trainer.configure_model(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , """model.onnx""" )
logger.info(F"exporting model to {output_model_file}" )
_SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=A__ , )
logger.info("""onnx export finished""" )
| 0 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A__ ) , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
| 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = False, False, False
@dataclass
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
# Automatically constructed
SCREAMING_SNAKE_CASE = "dict"
SCREAMING_SNAKE_CASE = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
SCREAMING_SNAKE_CASE = field(default='Audio' , init=_lowerCamelCase , repr=_lowerCamelCase)
def __call__( self ) -> Dict:
return self.pa_type
def UpperCamelCase ( self , A__ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(A__ , A__ ):
return {"bytes": None, "path": value}
elif isinstance(A__ , A__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_SCREAMING_SNAKE_CASE = BytesIO()
sf.write(A__ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_SCREAMING_SNAKE_CASE = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
_SCREAMING_SNAKE_CASE = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_27_67
_SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(A__ , A__ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCamelCase ( self , A__ , A__ = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
_SCREAMING_SNAKE_CASE = xsplitext(A__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
_SCREAMING_SNAKE_CASE = token_per_repo_id or {}
_SCREAMING_SNAKE_CASE = path.split("""::""" )[-1]
try:
_SCREAMING_SNAKE_CASE = string_to_dict(A__ , config.HUB_DATASETS_URL )["""repo_id"""]
_SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_SCREAMING_SNAKE_CASE = None
with xopen(A__ , """rb""" , use_auth_token=A__ ) as f:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = sf.read(A__ )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = sf.read(A__ )
_SCREAMING_SNAKE_CASE = array.T
if self.mono:
_SCREAMING_SNAKE_CASE = librosa.to_mono(A__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_SCREAMING_SNAKE_CASE = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate )
_SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A__ ) , type=pa.binary() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A__ ) , type=pa.string() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
_SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
_SCREAMING_SNAKE_CASE = storage.field("""bytes""" )
else:
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A__ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
_SCREAMING_SNAKE_CASE = storage.field("""path""" )
else:
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A__ ) , type=pa.string() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(A__ , self.pa_type )
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(A__ ):
with xopen(A__ , """rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
return bytes_
_SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(A__ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(A__ , self.pa_type )
| 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("""https://huggingface.co""" )
| 0 | 1 |
'''simple docstring'''
from math import sqrt
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 1_00_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> Iterator[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE_ ):
yield num
num += 1
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2_00_00_00 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda SCREAMING_SNAKE_CASE_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase__ : int = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCamelCase__ : str = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
UpperCamelCase__ : Optional[int] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a (datasets.Metric):
"""simple docstring"""
def UpperCamelCase ( self ) -> Dict:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def UpperCamelCase ( self , A__ , A__ , A__ = False , A__ = False , A__ = False , A__ = False , ) -> Dict:
_SCREAMING_SNAKE_CASE = len(references[0] )
if any(len(A__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
_SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(A__ )]
_SCREAMING_SNAKE_CASE = TER(
normalized=A__ , no_punct=A__ , asian_support=A__ , case_sensitive=A__ , )
_SCREAMING_SNAKE_CASE = sb_ter.corpus_score(A__ , A__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A__ ) , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
| 0 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.02 , A__=3 , A__=0.6 , A__=None , ) -> List[str]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = mask_ratio
_SCREAMING_SNAKE_CASE = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> List[Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = TFViTMAEModel(config=A__ )
_SCREAMING_SNAKE_CASE = model(A__ , training=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining(A__ )
_SCREAMING_SNAKE_CASE = model(A__ , training=A__ )
# expected sequence length = num_patches
_SCREAMING_SNAKE_CASE = (self.image_size // self.patch_size) ** 2
_SCREAMING_SNAKE_CASE = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining(A__ )
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = model(A__ , training=A__ )
_SCREAMING_SNAKE_CASE = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = TFViTMAEModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def UpperCamelCase ( self ) -> List[Any]:
pass
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
# make the mask reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ )
_SCREAMING_SNAKE_CASE = model(A__ , noise=A__ )
_SCREAMING_SNAKE_CASE = copy.deepcopy(self._prepare_for_class(A__ , A__ ) )
_SCREAMING_SNAKE_CASE = model(**A__ , noise=A__ )
_SCREAMING_SNAKE_CASE = outputs_dict[0].numpy()
_SCREAMING_SNAKE_CASE = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def UpperCamelCase ( self ) -> Optional[Any]:
# make the mask reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(A__ ):
_SCREAMING_SNAKE_CASE = {}
for k, v in inputs_dict.items():
if tf.is_tensor(A__ ):
_SCREAMING_SNAKE_CASE = v.numpy()
else:
_SCREAMING_SNAKE_CASE = np.array(A__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ )
_SCREAMING_SNAKE_CASE = prepare_numpy_arrays(A__ )
_SCREAMING_SNAKE_CASE = model(A__ , noise=A__ )
_SCREAMING_SNAKE_CASE = model(**A__ , noise=A__ )
self.assert_outputs_same(A__ , A__ )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Any:
# make masks reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_SCREAMING_SNAKE_CASE = tf_noise
super().check_pt_tf_models(A__ , A__ , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
# make mask reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(A__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(A__ , A__ ),)
if isinstance(A__ , A__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(A__ , """_keras_serializable""" , A__ )
}
_SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
_SCREAMING_SNAKE_CASE = main_layer_class(A__ )
_SCREAMING_SNAKE_CASE = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_SCREAMING_SNAKE_CASE = tf.keras.Model(A__ , outputs=main_layer(A__ ) )
_SCREAMING_SNAKE_CASE = model(A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = os.path.join(A__ , """keras_model.h5""" )
model.save(A__ )
_SCREAMING_SNAKE_CASE = tf.keras.models.load_model(
A__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(A__ , tf.keras.Model )
_SCREAMING_SNAKE_CASE = model(A__ )
self.assert_outputs_same(A__ , A__ )
@slow
def UpperCamelCase ( self ) -> int:
# make mask reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ )
_SCREAMING_SNAKE_CASE = model(A__ , noise=A__ )
if model_class.__name__ == "TFViTMAEModel":
_SCREAMING_SNAKE_CASE = outputs.last_hidden_state.numpy()
_SCREAMING_SNAKE_CASE = 0
else:
_SCREAMING_SNAKE_CASE = outputs.logits.numpy()
_SCREAMING_SNAKE_CASE = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A__ , saved_model=A__ )
_SCREAMING_SNAKE_CASE = model_class.from_pretrained(A__ )
_SCREAMING_SNAKE_CASE = model(A__ , noise=A__ )
if model_class.__name__ == "TFViTMAEModel":
_SCREAMING_SNAKE_CASE = after_outputs["""last_hidden_state"""].numpy()
_SCREAMING_SNAKE_CASE = 0
else:
_SCREAMING_SNAKE_CASE = after_outputs["""logits"""].numpy()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A__ , 1E-5 )
def UpperCamelCase ( self ) -> Dict:
# make mask reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ )
_SCREAMING_SNAKE_CASE = model(A__ , noise=A__ )
_SCREAMING_SNAKE_CASE = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(A__ )
_SCREAMING_SNAKE_CASE = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_SCREAMING_SNAKE_CASE = model_class.from_config(model.config )
_SCREAMING_SNAKE_CASE = new_model(A__ ) # Build model
new_model.set_weights(model.get_weights() )
_SCREAMING_SNAKE_CASE = new_model(A__ , noise=A__ )
self.assert_outputs_same(A__ , A__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCamelCase ( self ) -> Any:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def UpperCamelCase ( self ) -> Tuple:
pass
@slow
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(A__ )
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _a (unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> List[Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_SCREAMING_SNAKE_CASE = ViTMAEConfig()
_SCREAMING_SNAKE_CASE = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(1, num_patches) )
# forward pass
_SCREAMING_SNAKE_CASE = model(**A__ , noise=A__ )
# verify the logits
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , A__ )
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , A__ , atol=1E-4 )
| 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]:
super().__init__(self , **A__ )
_SCREAMING_SNAKE_CASE = repo_info
_SCREAMING_SNAKE_CASE = token
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]:
if not isinstance(self.repo_info , A__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha )
return fsspec.open(
A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase ( self , A__ , **A__ ) -> str:
self._get_dirs()
_SCREAMING_SNAKE_CASE = self._strip_protocol(A__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A__ )
def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]:
self._get_dirs()
_SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
_SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = p.parent
if root == path:
_SCREAMING_SNAKE_CASE = f
_SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 0 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : int = {"vocab_file": "spiece.model"}
UpperCamelCase__ : Tuple = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
UpperCamelCase__ : Dict = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
UpperCamelCase__ : Dict = "▁"
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A__ , A__=True , A__=True , A__=False , A__="[CLS]" , A__="[SEP]" , A__="<unk>" , A__="[SEP]" , A__="<pad>" , A__="[CLS]" , A__="[MASK]" , A__ = None , **A__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
_SCREAMING_SNAKE_CASE = do_lower_case
_SCREAMING_SNAKE_CASE = remove_space
_SCREAMING_SNAKE_CASE = keep_accents
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
@property
def UpperCamelCase ( self ) -> int:
return len(self.sp_model )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
if self.remove_space:
_SCREAMING_SNAKE_CASE = """ """.join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE = inputs
_SCREAMING_SNAKE_CASE = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE = unicodedata.normalize("""NFKD""" , A__ )
_SCREAMING_SNAKE_CASE = """""".join([c for c in outputs if not unicodedata.combining(A__ )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.preprocess_text(A__ )
_SCREAMING_SNAKE_CASE = self.sp_model.encode(A__ , out_type=A__ )
_SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(A__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(A__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A__ )
else:
new_pieces.append(A__ )
return new_pieces
def UpperCamelCase ( self , A__ ) -> Tuple:
return self.sp_model.PieceToId(A__ )
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.sp_model.IdToPiece(A__ )
def UpperCamelCase ( self , A__ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__ ) + token
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(A__ )
_SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self , A__ , A__ = None , A__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is not None:
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1]
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = [parquet_path]
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=("train",) ) -> List[str]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for split in splits:
_SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if split:
_SCREAMING_SNAKE_CASE = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE = """train"""
_SCREAMING_SNAKE_CASE = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
| 0 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'markuplm'
def __init__( self , A__=3_05_22 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=2 , A__=0.02 , A__=1E-12 , A__=0 , A__=0 , A__=2 , A__=2_56 , A__=10_24 , A__=2_16 , A__=10_01 , A__=32 , A__=50 , A__="absolute" , A__=True , A__=None , **A__ , ) -> int:
super().__init__(
pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ , )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = classifier_dropout
# additional properties
_SCREAMING_SNAKE_CASE = max_depth
_SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings
_SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings
_SCREAMING_SNAKE_CASE = tag_pad_id
_SCREAMING_SNAKE_CASE = subs_pad_id
_SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
| 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = F"Input value of [number={number}] must be an integer"
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_SCREAMING_SNAKE_CASE = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase__ : str = "sshleifer/tiny-mbart"
@require_torch
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , )
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=A__ )
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Any:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> str:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> List[str]:
self.run_seqaseq_quick(
distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ )
@require_apex
@require_torch_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCamelCase ( self , A__ ) -> List[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_SCREAMING_SNAKE_CASE = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_SCREAMING_SNAKE_CASE = experiments[experiment_id]
_SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_SCREAMING_SNAKE_CASE = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] )
_SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) )
self.assertEqual(A__ , data["""n_matches"""] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
# test if do_predict saves generations and metrics
_SCREAMING_SNAKE_CASE = os.listdir(A__ )
_SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A__ ) -> Tuple[int, float]:
_SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0"""
_SCREAMING_SNAKE_CASE = self.run_trainer(
max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig
_SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_SCREAMING_SNAKE_CASE = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict:
_SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split()
_SCREAMING_SNAKE_CASE = """
--do_predict
""".split()
_SCREAMING_SNAKE_CASE = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_SCREAMING_SNAKE_CASE = get_gpu_count()
_SCREAMING_SNAKE_CASE = get_torch_dist_unique_port()
_SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ , env=self.get_env() )
else:
_SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args
with patch.object(A__ , """argv""" , A__ ):
main()
return output_dir
| 0 | 1 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCamelCase__ : Union[str, Any] = namedtuple("covid_data", "cases deaths recovered")
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE_ ).content ).xpath(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ : Tuple = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 0 |
'''simple docstring'''
import sys
UpperCamelCase__ : int = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ):
_SCREAMING_SNAKE_CASE = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_SCREAMING_SNAKE_CASE = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 1_00_00_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = {1: 1}
for inputa in range(2 , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_SCREAMING_SNAKE_CASE = (3 * number) + 1
counter += 1
if inputa not in counters:
_SCREAMING_SNAKE_CASE = counter
if counter > pre_counter:
_SCREAMING_SNAKE_CASE = inputa
_SCREAMING_SNAKE_CASE = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 0 |
'''simple docstring'''
UpperCamelCase__ : Dict = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
_SCREAMING_SNAKE_CASE = """"""
for word in coded.split():
while len(SCREAMING_SNAKE_CASE_ ) != 0:
decoded += decode_dict[word[:5]]
_SCREAMING_SNAKE_CASE = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
_SCREAMING_SNAKE_CASE = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
_SCREAMING_SNAKE_CASE = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE_ , 1 ):
if n < _p:
# then we have our last prime to check
_SCREAMING_SNAKE_CASE = primes[:idx]
break
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
_SCREAMING_SNAKE_CASE = False
for r in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = pow(SCREAMING_SNAKE_CASE_ , d * 2**r , SCREAMING_SNAKE_CASE_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
_SCREAMING_SNAKE_CASE = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape
_SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
_SCREAMING_SNAKE_CASE = mam_aaa["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""]
_SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = KandinskyVaaPipeline
SCREAMING_SNAKE_CASE = [
'image_embeds',
'negative_image_embeds',
]
SCREAMING_SNAKE_CASE = ['image_embeds', 'negative_image_embeds']
SCREAMING_SNAKE_CASE = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE = False
@property
def UpperCamelCase ( self ) -> Dict:
return 32
@property
def UpperCamelCase ( self ) -> List[Any]:
return 32
@property
def UpperCamelCase ( self ) -> List[str]:
return self.time_input_dim
@property
def UpperCamelCase ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def UpperCamelCase ( self ) -> Any:
return 1_00
@property
def UpperCamelCase ( self ) -> Any:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**A__ )
return model
@property
def UpperCamelCase ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=A__ , set_alpha_to_one=A__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=A__ , )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase ( self , A__ , A__=0 ) -> Tuple:
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A__ ) ).to(A__ )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A__ )
if str(A__ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(A__ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=A__ ).manual_seed(A__ )
_SCREAMING_SNAKE_CASE = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = """cpu"""
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**A__ )
_SCREAMING_SNAKE_CASE = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(A__ ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(A__ ) , return_dict=A__ , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
_SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(A__ )
_SCREAMING_SNAKE_CASE = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(A__ )
pipeline.set_progress_bar_config(disable=A__ )
_SCREAMING_SNAKE_CASE = """red cat, 4k photo"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cuda""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pipe_prior(
A__ , generator=A__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cuda""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipeline(
image_embeds=A__ , negative_image_embeds=A__ , generator=A__ , num_inference_steps=1_00 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A__ , A__ )
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : str = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 1 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
UpperCamelCase__ : str = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Optional[Any]:
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def UpperCamelCase ( self ) -> Any:
return self.major, self.minor, self.patch
def UpperCamelCase ( self , A__ ) -> Dict:
if isinstance(A__ , A__ ):
return Version(A__ )
elif isinstance(A__ , A__ ):
return other
raise TypeError(F"{other} (type {type(A__ )}) cannot be compared to version." )
def __eq__( self , A__ ) -> Union[str, Any]:
try:
_SCREAMING_SNAKE_CASE = self._validate_operand(A__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , A__ ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self._validate_operand(A__ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCamelCase ( cls , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCamelCase ( self ) -> str:
return self.version_str
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = _VERSION_REG.match(SCREAMING_SNAKE_CASE_ )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(SCREAMING_SNAKE_CASE_ ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
return ".".join(str(SCREAMING_SNAKE_CASE_ ) for v in version_tuple )
| 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor'
SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , A__=None , A__=None , **A__ ) -> int:
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
_SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Dict:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.decode(*A__ , **A__ )
@property
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , )
return self.image_processor_class
| 0 | 1 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , *A__ , A__=None , A__=None , **A__ ) -> Union[str, Any]:
super().__init__(*A__ , **A__ )
_SCREAMING_SNAKE_CASE = eval_examples
_SCREAMING_SNAKE_CASE = post_process_function
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_SCREAMING_SNAKE_CASE = time.time()
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , metric_key_prefix=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
_SCREAMING_SNAKE_CASE = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
A__ , A__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
metrics.update(output.metrics )
else:
_SCREAMING_SNAKE_CASE = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ )
return metrics
def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ )
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_SCREAMING_SNAKE_CASE = time.time()
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , metric_key_prefix=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
_SCREAMING_SNAKE_CASE = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
A__ , A__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ )
| 0 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase__ : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCamelCase__ : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCamelCase__ : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a (datasets.Metric):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def UpperCamelCase ( self , A__ , A__ , A__=None ) -> List[str]:
return {
"matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ),
}
| 0 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 3_84
_SCREAMING_SNAKE_CASE = 7
if "tiny" in model_name:
_SCREAMING_SNAKE_CASE = 96
_SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
_SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif "small" in model_name:
_SCREAMING_SNAKE_CASE = 96
_SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif "base" in model_name:
_SCREAMING_SNAKE_CASE = 1_28
_SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 5_12
elif "large" in model_name:
_SCREAMING_SNAKE_CASE = 1_92
_SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 7_68
# set label information
_SCREAMING_SNAKE_CASE = 1_50
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """ade20k-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE_ , depths=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , window_size=SCREAMING_SNAKE_CASE_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
_SCREAMING_SNAKE_CASE = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , auxiliary_in_channels=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.stages.{i}.downsample.reduction.weight", F"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.weight", F"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.bias", F"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_SCREAMING_SNAKE_CASE = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
_SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[:dim, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[: dim]
_SCREAMING_SNAKE_CASE = in_proj_weight[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-dim :, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = x.shape
_SCREAMING_SNAKE_CASE = x.reshape(SCREAMING_SNAKE_CASE_ , 4 , in_channel // 4 )
_SCREAMING_SNAKE_CASE = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = x.shape
_SCREAMING_SNAKE_CASE = x.reshape(SCREAMING_SNAKE_CASE_ , in_channel // 4 , 4 )
_SCREAMING_SNAKE_CASE = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = x.shape[0]
_SCREAMING_SNAKE_CASE = x.reshape(4 , in_channel // 4 )
_SCREAMING_SNAKE_CASE = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = x.shape[0]
_SCREAMING_SNAKE_CASE = x.reshape(in_channel // 4 , 4 )
_SCREAMING_SNAKE_CASE = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
_SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" , file_name=SCREAMING_SNAKE_CASE_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE_ , param.shape )
_SCREAMING_SNAKE_CASE = get_upernet_config(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "bn" in key:
_SCREAMING_SNAKE_CASE = key.replace("""bn""" , """batch_norm""" )
_SCREAMING_SNAKE_CASE = val
# rename keys
_SCREAMING_SNAKE_CASE = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_SCREAMING_SNAKE_CASE = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE_ )
if "norm" in key:
_SCREAMING_SNAKE_CASE = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify on image
_SCREAMING_SNAKE_CASE = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("""RGB""" )
_SCREAMING_SNAKE_CASE = SegformerImageProcessor()
_SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase__ : Union[str, Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(SCREAMING_SNAKE_CASE_ ):
print(F"{i}\t\t{d}" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count
_SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_SCREAMING_SNAKE_CASE = distance[u] + w
_SCREAMING_SNAKE_CASE = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : int = int(input("Enter number of vertices: ").strip())
UpperCamelCase__ : int = int(input("Enter number of edges: ").strip())
UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
UpperCamelCase__ : Optional[Any] = {"src": src, "dst": dest, "weight": weight}
UpperCamelCase__ : Optional[Any] = int(input("\nEnter shortest path source:").strip())
UpperCamelCase__ : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self , A__ , A__ ) -> List[str]:
super().__init__()
self.register_modules(unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self , A__ = 1 , A__ = 50 , A__ = None , A__ = "pil" , A__ = True , **A__ , ) -> Union[Tuple, ImagePipelineOutput]:
_SCREAMING_SNAKE_CASE = self.unet.config.sample_size
_SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
_SCREAMING_SNAKE_CASE = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_SCREAMING_SNAKE_CASE = randn_tensor(A__ , generator=A__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_SCREAMING_SNAKE_CASE = self.scheduler.schedule[t]
_SCREAMING_SNAKE_CASE = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.scheduler.add_noise_to_input(A__ , A__ , generator=A__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_SCREAMING_SNAKE_CASE = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_SCREAMING_SNAKE_CASE = self.scheduler.step(A__ , A__ , A__ , A__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_SCREAMING_SNAKE_CASE = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_SCREAMING_SNAKE_CASE = self.scheduler.step_correct(
A__ , A__ , A__ , A__ , step_output.prev_sample , step_output["""derivative"""] , )
_SCREAMING_SNAKE_CASE = step_output.prev_sample
_SCREAMING_SNAKE_CASE = (sample / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 5_12
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerModel(config=A__ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(A__ )
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> str:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Dict:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=A__ )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> str:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A__ )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(A__ )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(A__ )[0]
# TODO Replace vocab size
_SCREAMING_SNAKE_CASE = 5_00_00
_SCREAMING_SNAKE_CASE = [1, 6, vocab_size]
self.assertEqual(output.shape , A__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-4 )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_SCREAMING_SNAKE_CASE = emba(input_ids.shape )
_SCREAMING_SNAKE_CASE = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
_SCREAMING_SNAKE_CASE = emba.weight[:3, :5]
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ) -> int:
# 2,12,16,64
_SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_SCREAMING_SNAKE_CASE = embed_positions([2, 16, 7_68] )[None, None, :, :]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A__ , A__ , A__ )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
| 0 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCamelCase)
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True})
SCREAMING_SNAKE_CASE = Features({'question': Value('string'), 'context': Value('string')})
SCREAMING_SNAKE_CASE = Features(
{
'answers': Sequence(
{
'text': Value('string'),
'answer_start': Value('int32'),
})
})
SCREAMING_SNAKE_CASE = "question"
SCREAMING_SNAKE_CASE = "context"
SCREAMING_SNAKE_CASE = "answers"
@property
def UpperCamelCase ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 1 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCamelCase__ : Tuple = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , *A__ , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
super().__init__(*A__ , **A__ )
_SCREAMING_SNAKE_CASE = eval_examples
_SCREAMING_SNAKE_CASE = post_process_function
_SCREAMING_SNAKE_CASE = quant_trainer_args
_SCREAMING_SNAKE_CASE = 1_28 # default number of calibration samples
def UpperCamelCase ( self , A__=None ) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
_SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
_SCREAMING_SNAKE_CASE = self._remove_unused_columns(A__ , description="""Calibration""" )
return DataLoader(
A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , )
def UpperCamelCase ( self , A__=None ) -> str:
_SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
_SCREAMING_SNAKE_CASE = self.get_calib_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ )
model.eval()
quant_trainer.enable_calibration(A__ )
logger.info("""***** Running calibration *****""" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(A__ ):
# Prediction step
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prediction_step(A__ , A__ , prediction_loss_only=A__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = model
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
self.log(A__ )
else:
_SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ )
return metrics
def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ )
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ )
def UpperCamelCase ( self , A__="./" ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = next(iter(A__ ) )
# saving device - to make it consistent
_SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
_SCREAMING_SNAKE_CASE = tuple(v.to(A__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.model.to(A__ )
model.eval()
model.float()
_SCREAMING_SNAKE_CASE = model.module if hasattr(A__ , """module""" ) else model
quant_trainer.configure_model(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , """model.onnx""" )
logger.info(F"exporting model to {output_model_file}" )
_SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=A__ , )
logger.info("""onnx export finished""" )
| 0 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XCLIPTextConfig()
# derive patch size from model name
_SCREAMING_SNAKE_CASE = model_name.find("""patch""" )
_SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
_SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
if model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = 3_36
_SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
_SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
_SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
_SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn.in_proj" in key:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if key.startswith("""visual""" ):
_SCREAMING_SNAKE_CASE = key_split[3]
_SCREAMING_SNAKE_CASE = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[
:dim
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
elif key.startswith("""mit""" ):
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.text_config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_SCREAMING_SNAKE_CASE = val.T
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
_SCREAMING_SNAKE_CASE = """eating_spaghetti.npy"""
elif num_frames == 32:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy"""
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , )
_SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
_SCREAMING_SNAKE_CASE = model_to_url[model_name]
_SCREAMING_SNAKE_CASE = 8
if "16-frames" in model_name:
_SCREAMING_SNAKE_CASE = 16
elif "shot" in model_name:
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
model.eval()
if "drive" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""]
else:
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""]
_SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24
_SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
_SCREAMING_SNAKE_CASE = outputs.logits_per_video
_SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 )
print("""Probs:""" , SCREAMING_SNAKE_CASE_ )
# kinetics-400
if model_name == "xclip-base-patch32":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase__ : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
_validate_point(SCREAMING_SNAKE_CASE_ )
_validate_point(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> None:
"""simple docstring"""
if point:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ):
_SCREAMING_SNAKE_CASE = (
"""Expected a list of numbers as input, found """
F"{type(SCREAMING_SNAKE_CASE_ ).__name__}"
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
else:
_SCREAMING_SNAKE_CASE = F"Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE_ ).__name__}"
raise TypeError(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError("""Missing an input""" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
_validate_point(SCREAMING_SNAKE_CASE_ )
_validate_point(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = params
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A__ ) -> Dict:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Tuple:
return len(self.lengths )
def UpperCamelCase ( self ) -> Dict:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.params.max_model_input_size
_SCREAMING_SNAKE_CASE = self.lengths > max_len
logger.info(F"Splitting {sum(A__ )} too long sequences." )
def divide_chunks(A__ , A__ ):
return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if self.params.mlm:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_SCREAMING_SNAKE_CASE = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ )
if sub_s[-1] != sep_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ )
assert len(A__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__ )
new_tok_ids.extend(A__ )
new_lengths.extend([len(A__ ) for l in sub_seqs] )
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array(A__ )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = self.lengths > 11
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def UpperCamelCase ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def UpperCamelCase ( self ) -> Optional[Any]:
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase ( self , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = [t[0] for t in batch]
_SCREAMING_SNAKE_CASE = [t[1] for t in batch]
assert len(A__ ) == len(A__ )
# Max for paddings
_SCREAMING_SNAKE_CASE = max(A__ )
# Pad token ids
if self.params.mlm:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""]
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids]
assert len(tk_ ) == len(A__ )
assert all(len(A__ ) == max_seq_len_ for t in tk_ )
_SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_)
_SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs)
return tk_t, lg_t
| 0 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self , A__ , A__ ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self , A__ = 1 , A__ = 20_00 , A__ = None , A__ = "pil" , A__ = True , **A__ , ) -> Union[ImagePipelineOutput, Tuple]:
_SCREAMING_SNAKE_CASE = self.unet.config.sample_size
_SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
_SCREAMING_SNAKE_CASE = self.unet
_SCREAMING_SNAKE_CASE = randn_tensor(A__ , generator=A__ ) * self.scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(A__ )
self.scheduler.set_sigmas(A__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_SCREAMING_SNAKE_CASE = self.unet(A__ , A__ ).sample
_SCREAMING_SNAKE_CASE = self.scheduler.step_correct(A__ , A__ , generator=A__ ).prev_sample
# prediction step
_SCREAMING_SNAKE_CASE = model(A__ , A__ ).sample
_SCREAMING_SNAKE_CASE = self.scheduler.step_pred(A__ , A__ , A__ , generator=A__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
_SCREAMING_SNAKE_CASE = sample_mean.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(A__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A__ )
| 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Any = "▁"
UpperCamelCase__ : Any = {"vocab_file": "spiece.model"}
UpperCamelCase__ : int = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
UpperCamelCase__ : Optional[int] = {
"google/reformer-crime-and-punishment": 524_288,
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , A__ , A__="</s>" , A__="<unk>" , A__=[] , A__ = None , **A__ , ) -> None:
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A__ , unk_token=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
@property
def UpperCamelCase ( self ) -> Any:
return self.sp_model.get_piece_size()
def UpperCamelCase ( self ) -> Dict[str, int]:
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A__ )
def UpperCamelCase ( self , A__ ) -> List[Any]:
if index < self.sp_model.get_piece_size():
_SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A__ )
return token
def UpperCamelCase ( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 0 | 1 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'efficientformer'
def __init__( self , A__ = [3, 2, 6, 4] , A__ = [48, 96, 2_24, 4_48] , A__ = [True, True, True, True] , A__ = 4_48 , A__ = 32 , A__ = 4 , A__ = 7 , A__ = 5 , A__ = 8 , A__ = 4 , A__ = 0.0 , A__ = 16 , A__ = 3 , A__ = 3 , A__ = 3 , A__ = 2 , A__ = 1 , A__ = 0.0 , A__ = 1 , A__ = True , A__ = True , A__ = 1E-5 , A__ = "gelu" , A__ = 0.02 , A__ = 1E-12 , A__ = 2_24 , A__ = 1E-05 , **A__ , ) -> None:
super().__init__(**A__ )
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = hidden_sizes
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = mlp_expansion_ratio
_SCREAMING_SNAKE_CASE = downsamples
_SCREAMING_SNAKE_CASE = dim
_SCREAMING_SNAKE_CASE = key_dim
_SCREAMING_SNAKE_CASE = attention_ratio
_SCREAMING_SNAKE_CASE = resolution
_SCREAMING_SNAKE_CASE = pool_size
_SCREAMING_SNAKE_CASE = downsample_patch_size
_SCREAMING_SNAKE_CASE = downsample_stride
_SCREAMING_SNAKE_CASE = downsample_pad
_SCREAMING_SNAKE_CASE = drop_path_rate
_SCREAMING_SNAKE_CASE = num_metaad_blocks
_SCREAMING_SNAKE_CASE = distillation
_SCREAMING_SNAKE_CASE = use_layer_scale
_SCREAMING_SNAKE_CASE = layer_scale_init_value
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = batch_norm_eps
| 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def UpperCamelCase ( self ) -> Any:
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
_SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(A__ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase ( self ) -> str:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase ( self ) -> Dict:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
_SCREAMING_SNAKE_CASE = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 0 | 1 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
_SCREAMING_SNAKE_CASE = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_SCREAMING_SNAKE_CASE = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
_SCREAMING_SNAKE_CASE = F"layers_{str(SCREAMING_SNAKE_CASE_ )}"
# Self-Attention
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_SCREAMING_SNAKE_CASE = flax_model.params["""encoder"""]["""block"""][str(SCREAMING_SNAKE_CASE_ )]["""layer"""]
_SCREAMING_SNAKE_CASE = tax_attention_key
_SCREAMING_SNAKE_CASE = tax_attention_out
_SCREAMING_SNAKE_CASE = tax_attention_query
_SCREAMING_SNAKE_CASE = tax_attention_value
_SCREAMING_SNAKE_CASE = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE = tax_global_layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = tax_mlp_wi_a
_SCREAMING_SNAKE_CASE = tax_mlp_wi_a
else:
_SCREAMING_SNAKE_CASE = tax_mlp_wi
_SCREAMING_SNAKE_CASE = tax_mlp_wo
_SCREAMING_SNAKE_CASE = tax_mlp_layer_norm
_SCREAMING_SNAKE_CASE = flax_model_encoder_layer_block
# Only for layer 0:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_SCREAMING_SNAKE_CASE = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
_SCREAMING_SNAKE_CASE = tax_encoder_global_rel_embedding
# Assigning
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
_SCREAMING_SNAKE_CASE = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_SCREAMING_SNAKE_CASE = F"layers_{str(SCREAMING_SNAKE_CASE_ )}"
# Self-Attention
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""key"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""out"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""query"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_SCREAMING_SNAKE_CASE = flax_model.params["""decoder"""]["""block"""][str(SCREAMING_SNAKE_CASE_ )]["""layer"""]
_SCREAMING_SNAKE_CASE = tax_attention_key
_SCREAMING_SNAKE_CASE = tax_attention_out
_SCREAMING_SNAKE_CASE = tax_attention_query
_SCREAMING_SNAKE_CASE = tax_attention_value
_SCREAMING_SNAKE_CASE = tax_pre_attention_layer_norm
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_key
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_out
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_query
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_value
_SCREAMING_SNAKE_CASE = tax_cross_layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = tax_mlp_wi_a
_SCREAMING_SNAKE_CASE = tax_mlp_wi_a
else:
_SCREAMING_SNAKE_CASE = tax_mlp_wi
_SCREAMING_SNAKE_CASE = tax_mlp_wo
_SCREAMING_SNAKE_CASE = txa_mlp_layer_norm
_SCREAMING_SNAKE_CASE = flax_model_decoder_layer_block
# Decoder Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
_SCREAMING_SNAKE_CASE = txa_decoder_norm
# Only for layer 0:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_SCREAMING_SNAKE_CASE = tax_decoder_rel_embedding
# Token Embeddings
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
_SCREAMING_SNAKE_CASE = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
UpperCamelCase__ : int = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCamelCase__ : Tuple = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , *A__ , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
super().__init__(*A__ , **A__ )
_SCREAMING_SNAKE_CASE = eval_examples
_SCREAMING_SNAKE_CASE = post_process_function
_SCREAMING_SNAKE_CASE = quant_trainer_args
_SCREAMING_SNAKE_CASE = 1_28 # default number of calibration samples
def UpperCamelCase ( self , A__=None ) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
_SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
_SCREAMING_SNAKE_CASE = self._remove_unused_columns(A__ , description="""Calibration""" )
return DataLoader(
A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , )
def UpperCamelCase ( self , A__=None ) -> str:
_SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
_SCREAMING_SNAKE_CASE = self.get_calib_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ )
model.eval()
quant_trainer.enable_calibration(A__ )
logger.info("""***** Running calibration *****""" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(A__ ):
# Prediction step
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prediction_step(A__ , A__ , prediction_loss_only=A__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = model
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
self.log(A__ )
else:
_SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ )
return metrics
def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ )
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ )
def UpperCamelCase ( self , A__="./" ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = next(iter(A__ ) )
# saving device - to make it consistent
_SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
_SCREAMING_SNAKE_CASE = tuple(v.to(A__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.model.to(A__ )
model.eval()
model.float()
_SCREAMING_SNAKE_CASE = model.module if hasattr(A__ , """module""" ) else model
quant_trainer.configure_model(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , """model.onnx""" )
logger.info(F"exporting model to {output_model_file}" )
_SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=A__ , )
logger.info("""onnx export finished""" )
| 0 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = """laion/clap-htsat-unfused"""
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
def UpperCamelCase ( self , **A__ ) -> Optional[int]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A__ )
def UpperCamelCase ( self , **A__ ) -> int:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A__ )
def UpperCamelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A__ )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_SCREAMING_SNAKE_CASE = self.get_feature_extractor(do_normalize=A__ , padding_value=1.0 )
_SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
_SCREAMING_SNAKE_CASE = floats_list((3, 10_00) )
_SCREAMING_SNAKE_CASE = feature_extractor(A__ , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE = processor(audios=A__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
_SCREAMING_SNAKE_CASE = """This is a test string"""
_SCREAMING_SNAKE_CASE = processor(text=A__ )
_SCREAMING_SNAKE_CASE = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
_SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE = processor.batch_decode(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _a (unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = TextaTextGenerationPipeline(model=A__ , tokenizer=A__ )
return generator, ["Something to write", "Something else"]
def UpperCamelCase ( self , A__ , A__ ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = generator("""Something there""" )
self.assertEqual(A__ , [{"""generated_text""": ANY(A__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_SCREAMING_SNAKE_CASE = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=A__ )
self.assertEqual(
A__ , [
[{"""generated_text""": ANY(A__ )}, {"""generated_text""": ANY(A__ )}],
[{"""generated_text""": ANY(A__ )}, {"""generated_text""": ANY(A__ )}],
] , )
_SCREAMING_SNAKE_CASE = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=A__ )
self.assertEqual(
A__ , [
[{"""generated_text""": ANY(A__ )}, {"""generated_text""": ANY(A__ )}],
[{"""generated_text""": ANY(A__ )}, {"""generated_text""": ANY(A__ )}],
] , )
with self.assertRaises(A__ ):
generator(4 )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_SCREAMING_SNAKE_CASE = generator("""Something there""" , do_sample=A__ )
self.assertEqual(A__ , [{"""generated_text""": """"""}] )
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = generator(
"""Something there""" , num_return_sequences=A__ , num_beams=A__ , )
_SCREAMING_SNAKE_CASE = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = generator("""This is a test""" , do_sample=A__ , num_return_sequences=2 , return_tensors=A__ )
self.assertEqual(
A__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_SCREAMING_SNAKE_CASE = generator.model.config.eos_token_id
_SCREAMING_SNAKE_CASE = """<pad>"""
_SCREAMING_SNAKE_CASE = generator(
["""This is a test""", """This is a second test"""] , do_sample=A__ , num_return_sequences=2 , batch_size=2 , return_tensors=A__ , )
self.assertEqual(
A__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_SCREAMING_SNAKE_CASE = generator("""Something there""" , do_sample=A__ )
self.assertEqual(A__ , [{"""generated_text""": """"""}] )
| 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("""https://huggingface.co""" )
| 0 | 1 |
'''simple docstring'''
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self , A__ ) -> None:
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase_ ( ) -> None: # Main function for testing.
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Node(1 )
_SCREAMING_SNAKE_CASE = Node(2 )
_SCREAMING_SNAKE_CASE = Node(3 )
_SCREAMING_SNAKE_CASE = Node(4 )
_SCREAMING_SNAKE_CASE = Node(5 )
_SCREAMING_SNAKE_CASE = Node(6 )
_SCREAMING_SNAKE_CASE = Node(7 )
_SCREAMING_SNAKE_CASE = Node(8 )
_SCREAMING_SNAKE_CASE = Node(9 )
print(is_full_binary_tree(SCREAMING_SNAKE_CASE_ ) )
print(depth_of_tree(SCREAMING_SNAKE_CASE_ ) )
print("""Tree is: """ )
display(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> Iterator[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE_ ):
yield num
num += 1
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2_00_00_00 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda SCREAMING_SNAKE_CASE_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.02 , A__=None , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> Optional[Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]:
_SCREAMING_SNAKE_CASE = ViTMSNModel(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> int:
_SCREAMING_SNAKE_CASE = self.type_sequence_label_size
_SCREAMING_SNAKE_CASE = ViTMSNForImageClassification(A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , labels=A__ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = ViTMSNForImageClassification(A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = ViTMSNModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def UpperCamelCase ( self ) -> Dict:
pass
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = ViTMSNModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a (unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> str:
torch.manual_seed(2 )
_SCREAMING_SNAKE_CASE = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(A__ )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**A__ )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1E-4 ) )
| 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A__ ) , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
| 0 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a (metaclass=_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['note_seq']
def __init__( self , *A__ , **A__ ) -> int:
requires_backends(self , ["""note_seq"""] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Union[str, Any]:
requires_backends(cls , ["""note_seq"""] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> int:
requires_backends(cls , ["""note_seq"""] )
| 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]:
super().__init__(self , **A__ )
_SCREAMING_SNAKE_CASE = repo_info
_SCREAMING_SNAKE_CASE = token
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]:
if not isinstance(self.repo_info , A__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha )
return fsspec.open(
A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase ( self , A__ , **A__ ) -> str:
self._get_dirs()
_SCREAMING_SNAKE_CASE = self._strip_protocol(A__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A__ )
def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]:
self._get_dirs()
_SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
_SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = p.parent
if root == path:
_SCREAMING_SNAKE_CASE = f
_SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("""only integers accepted as input""" )
else:
_SCREAMING_SNAKE_CASE = str(abs(SCREAMING_SNAKE_CASE_ ) )
_SCREAMING_SNAKE_CASE = [list(SCREAMING_SNAKE_CASE_ ) for char in range(len(SCREAMING_SNAKE_CASE_ ) )]
for index in range(len(SCREAMING_SNAKE_CASE_ ) ):
num_transpositions[index].pop(SCREAMING_SNAKE_CASE_ )
return max(
int("""""".join(list(SCREAMING_SNAKE_CASE_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = [parquet_path]
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=("train",) ) -> List[str]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for split in splits:
_SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if split:
_SCREAMING_SNAKE_CASE = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE = """train"""
_SCREAMING_SNAKE_CASE = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
| 0 | 1 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCamelCase__ : Union[str, Any] = get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> int:
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_SCREAMING_SNAKE_CASE = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_SCREAMING_SNAKE_CASE = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
_SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if accelerator.process_index == 0:
logger.info(F"Saving model to {output_model_file}" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_SCREAMING_SNAKE_CASE = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
_SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Saving model to {output_model_file}" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , F"{MODEL_NAME}_{model_index}" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"Saving model to {ckpt_dir}" )
_SCREAMING_SNAKE_CASE = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"Model saved to {ckpt_dir}" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
_SCREAMING_SNAKE_CASE = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
_SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Loading model from {input_model_file}" )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_SCREAMING_SNAKE_CASE = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
_SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Loading model from {input_model_file}" )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_SCREAMING_SNAKE_CASE = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"{MODEL_NAME}_{model_index}" )
if F"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading model from {ckpt_dir}" )
_SCREAMING_SNAKE_CASE = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , )
_SCREAMING_SNAKE_CASE = state_dict["""model"""]
logger.info(F"Model loaded from {ckpt_dir}" )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> str:
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_SCREAMING_SNAKE_CASE = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_SCREAMING_SNAKE_CASE = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
_SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Saving Optimizer state to {output_optimizer_file}" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Optimizer state saved in {output_optimizer_file}" )
else:
_SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , F"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"Optimizer state saved in {ckpt_dir}" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> List[Any]:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_SCREAMING_SNAKE_CASE = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_SCREAMING_SNAKE_CASE = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
_SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Loading Optimizer state from {input_optimizer_file}" )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"Optimizer state loaded from {input_optimizer_file}" )
else:
_SCREAMING_SNAKE_CASE = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"{OPTIMIZER_NAME}_{optimizer_index}" )
if F"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading Optimizer from {ckpt_dir}" )
_SCREAMING_SNAKE_CASE = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , )
_SCREAMING_SNAKE_CASE = optim_state["""optimizer"""]
logger.info(F"Optimizer loaded from {ckpt_dir}" )
_SCREAMING_SNAKE_CASE = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
| 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase__ : Dict = "Enter the base and the power separated by a comma: "
UpperCamelCase__ , UpperCamelCase__ : Dict = map(int, input(prompt).split(","))
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase__ : int = res(xa, ya)
UpperCamelCase__ : Optional[Any] = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 0 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase__ : str = "sshleifer/tiny-mbart"
@require_torch
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , )
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=A__ )
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Any:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> str:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> List[str]:
self.run_seqaseq_quick(
distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ )
@require_apex
@require_torch_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCamelCase ( self , A__ ) -> List[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_SCREAMING_SNAKE_CASE = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_SCREAMING_SNAKE_CASE = experiments[experiment_id]
_SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_SCREAMING_SNAKE_CASE = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] )
_SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) )
self.assertEqual(A__ , data["""n_matches"""] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
# test if do_predict saves generations and metrics
_SCREAMING_SNAKE_CASE = os.listdir(A__ )
_SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A__ ) -> Tuple[int, float]:
_SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0"""
_SCREAMING_SNAKE_CASE = self.run_trainer(
max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig
_SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_SCREAMING_SNAKE_CASE = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict:
_SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split()
_SCREAMING_SNAKE_CASE = """
--do_predict
""".split()
_SCREAMING_SNAKE_CASE = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_SCREAMING_SNAKE_CASE = get_gpu_count()
_SCREAMING_SNAKE_CASE = get_torch_dist_unique_port()
_SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ , env=self.get_env() )
else:
_SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args
with patch.object(A__ , """argv""" , A__ ):
main()
return output_dir
| 0 | 1 |
'''simple docstring'''
from math import pi
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 0 |
'''simple docstring'''
import sys
UpperCamelCase__ : int = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ):
_SCREAMING_SNAKE_CASE = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_SCREAMING_SNAKE_CASE = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ) -> None:
"""simple docstring"""
if start is None:
_SCREAMING_SNAKE_CASE = 0
if end is None:
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) - 1
if start >= end:
return
_SCREAMING_SNAKE_CASE = (start + end) // 2
slowsort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
slowsort(SCREAMING_SNAKE_CASE_ , mid + 1 , SCREAMING_SNAKE_CASE_ )
if sequence[end] < sequence[mid]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = sequence[mid], sequence[end]
slowsort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 |
'''simple docstring'''
UpperCamelCase__ : Dict = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
_SCREAMING_SNAKE_CASE = """"""
for word in coded.split():
while len(SCREAMING_SNAKE_CASE_ ) != 0:
decoded += decode_dict[word[:5]]
_SCREAMING_SNAKE_CASE = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 1 |
'''simple docstring'''
import requests
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {"""Content-Type""": """application/json"""}
_SCREAMING_SNAKE_CASE = requests.post(SCREAMING_SNAKE_CASE_ , json={"""text""": message_body} , headers=SCREAMING_SNAKE_CASE_ )
if response.status_code != 2_00:
_SCREAMING_SNAKE_CASE = (
"""Request to slack returned an error """
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape
_SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
_SCREAMING_SNAKE_CASE = mam_aaa["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""]
_SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = CanineTokenizer
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> str:
super().setUp()
_SCREAMING_SNAKE_CASE = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self ) -> str:
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def UpperCamelCase ( self , **A__ ) -> CanineTokenizer:
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
_SCREAMING_SNAKE_CASE = 10_24
return tokenizer
@require_torch
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.canine_tokenizer
_SCREAMING_SNAKE_CASE = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
_SCREAMING_SNAKE_CASE = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
_SCREAMING_SNAKE_CASE = tokenizer(A__ , padding=A__ , return_tensors="""pt""" )
self.assertIsInstance(A__ , A__ )
_SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] )
self.assertListEqual(A__ , A__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.canine_tokenizer
_SCREAMING_SNAKE_CASE = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
_SCREAMING_SNAKE_CASE = tokenizer(A__ , padding=A__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , A__ )
self.assertIn("""attention_mask""" , A__ )
self.assertIn("""token_type_ids""" , A__ )
@require_torch
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.canine_tokenizer
_SCREAMING_SNAKE_CASE = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
_SCREAMING_SNAKE_CASE = tokenizer(
text_target=A__ , max_length=32 , padding="""max_length""" , truncation=A__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def UpperCamelCase ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
_SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = """ He is very happy, UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
tokenizer.save_pretrained(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(A__ )
_SCREAMING_SNAKE_CASE = after_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
shutil.rmtree(A__ )
_SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = """ He is very happy, UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_SCREAMING_SNAKE_CASE = chr(0Xe_0_0_7 )
additional_special_tokens.append(A__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
tokenizer.save_pretrained(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(A__ )
_SCREAMING_SNAKE_CASE = after_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
self.assertIn(A__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(A__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_clean_sequence(A__ )
# a special token for Canine can be defined as follows:
_SCREAMING_SNAKE_CASE = 0Xe_0_0_5
_SCREAMING_SNAKE_CASE = chr(A__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertEqual(len(A__ ) , 1 )
_SCREAMING_SNAKE_CASE = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertEqual(A__ , input_encoded + special_token_id )
_SCREAMING_SNAKE_CASE = tokenizer.decode(A__ , skip_special_tokens=A__ )
self.assertTrue(special_token not in decoded )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_SCREAMING_SNAKE_CASE = chr(0Xe_0_0_5 )
_SCREAMING_SNAKE_CASE = chr(0Xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=A__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
self.assertEqual(len(A__ ) , 1 )
self.assertEqual(len(A__ ) , 1 )
self.assertEqual(token_a[0] , A__ )
self.assertEqual(token_a[0] , A__ )
@require_tokenizers
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
_SCREAMING_SNAKE_CASE = 0Xe_0_0_6
_SCREAMING_SNAKE_CASE = chr(A__ )
_SCREAMING_SNAKE_CASE = AddedToken(A__ , lstrip=A__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(A__ )
tokenizer.from_pretrained(A__ )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A__ )
with open(os.path.join(A__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_SCREAMING_SNAKE_CASE = json.load(A__ )
with open(os.path.join(A__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_SCREAMING_SNAKE_CASE = json.load(A__ )
# a special token for Canine can be defined as follows:
_SCREAMING_SNAKE_CASE = 0Xe_0_0_6
_SCREAMING_SNAKE_CASE = chr(A__ )
_SCREAMING_SNAKE_CASE = [new_token_a]
_SCREAMING_SNAKE_CASE = [new_token_a]
with open(os.path.join(A__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(A__ , A__ )
with open(os.path.join(A__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(A__ , A__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(A__ , extra_ids=0 )
self.assertIn(A__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_SCREAMING_SNAKE_CASE = 0Xe_0_0_7
_SCREAMING_SNAKE_CASE = chr(A__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_SCREAMING_SNAKE_CASE = [AddedToken(A__ , lstrip=A__ )]
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , extra_ids=0 )
self.assertIn(A__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_SCREAMING_SNAKE_CASE = """hello world"""
if self.space_between_special_tokens:
_SCREAMING_SNAKE_CASE = """[CLS] hello world [SEP]"""
else:
_SCREAMING_SNAKE_CASE = input
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.decode(A__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(A__ , [output, output.lower()] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_SCREAMING_SNAKE_CASE = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_SCREAMING_SNAKE_CASE = """a"""
_SCREAMING_SNAKE_CASE = ord(A__ )
for attr in attributes_list:
setattr(A__ , attr + """_id""" , A__ )
self.assertEqual(getattr(A__ , A__ ) , A__ )
self.assertEqual(getattr(A__ , attr + """_id""" ) , A__ )
setattr(A__ , attr + """_id""" , A__ )
self.assertEqual(getattr(A__ , A__ ) , A__ )
self.assertEqual(getattr(A__ , attr + """_id""" ) , A__ )
setattr(A__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(A__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(A__ , """additional_special_tokens_ids""" ) , [] )
_SCREAMING_SNAKE_CASE = 0Xe_0_0_6
_SCREAMING_SNAKE_CASE = chr(A__ )
setattr(A__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(A__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(A__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
def UpperCamelCase ( self ) -> List[str]:
pass
def UpperCamelCase ( self ) -> Optional[Any]:
pass
def UpperCamelCase ( self ) -> List[Any]:
pass
def UpperCamelCase ( self ) -> Optional[int]:
pass
def UpperCamelCase ( self ) -> List[Any]:
pass
def UpperCamelCase ( self ) -> int:
pass
def UpperCamelCase ( self ) -> Optional[int]:
pass
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : str = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 1 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=4 , A__="gelu" , A__=0.0 , A__=0.1 , A__=True , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> Dict:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_multiple_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = weight_tying
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCamelCase ( self ) -> Any:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = True
return config, input_ids, input_mask, token_labels
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Dict:
_SCREAMING_SNAKE_CASE = GPTNeoXJapaneseModel(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ )
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> str:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = GPTNeoXJapaneseModel(A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = GPTNeoXJapaneseForCausalLM(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = GPTNeoXJapaneseForCausalLM(config=A__ )
model.to(A__ )
model.eval()
# first forward pass
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , use_cache=A__ )
_SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
_SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , output_hidden_states=A__ )
_SCREAMING_SNAKE_CASE = output_from_no_past["""hidden_states"""][0]
_SCREAMING_SNAKE_CASE = model(
A__ , attention_mask=A__ , past_key_values=A__ , output_hidden_states=A__ , )["""hidden_states"""][0]
# select random slice
_SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
_SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1E-3 ) )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = GPTNeoXJapaneseModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A__ , A__ , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A__ , A__ , A__ )
def UpperCamelCase ( self ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
_SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(A__ , A__ , A__ )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A__ , A__ , A__ )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A__ )
@slow
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = """abeja/gpt-neox-japanese-2.7b"""
_SCREAMING_SNAKE_CASE = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
_SCREAMING_SNAKE_CASE = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
_SCREAMING_SNAKE_CASE = GPTNeoXJapaneseTokenizer.from_pretrained(A__ )
_SCREAMING_SNAKE_CASE = GPTNeoXJapaneseForCausalLM.from_pretrained(A__ )
_SCREAMING_SNAKE_CASE = []
for prompt in prompts:
_SCREAMING_SNAKE_CASE = tokenizer(A__ , return_tensors="""pt""" ).input_ids
_SCREAMING_SNAKE_CASE = model.generate(A__ , max_length=50 )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
predicted_outputs += generated_string
self.assertListEqual(A__ , A__ )
| 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor'
SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , A__=None , A__=None , **A__ ) -> int:
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
_SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Dict:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.decode(*A__ , **A__ )
@property
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , )
return self.image_processor_class
| 0 | 1 |
'''simple docstring'''
import numpy as np
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase__ : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCamelCase__ : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCamelCase__ : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a (datasets.Metric):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def UpperCamelCase ( self , A__ , A__ , A__=None ) -> List[str]:
return {
"matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ),
}
| 0 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ : str = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(SCREAMING_SNAKE_CASE_ ):
print(F"{i}\t\t{d}" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count
_SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_SCREAMING_SNAKE_CASE = distance[u] + w
_SCREAMING_SNAKE_CASE = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : int = int(input("Enter number of vertices: ").strip())
UpperCamelCase__ : int = int(input("Enter number of edges: ").strip())
UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
UpperCamelCase__ : Optional[Any] = {"src": src, "dst": dest, "weight": weight}
UpperCamelCase__ : Optional[Any] = int(input("\nEnter shortest path source:").strip())
UpperCamelCase__ : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 1 |
'''simple docstring'''
import os
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + """/grid.txt""" ) as f:
_SCREAMING_SNAKE_CASE = [] # noqa: E741
for _ in range(20 ):
l.append([int(SCREAMING_SNAKE_CASE_ ) for x in f.readline().split()] )
_SCREAMING_SNAKE_CASE = 0
# right
for i in range(20 ):
for j in range(17 ):
_SCREAMING_SNAKE_CASE = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_SCREAMING_SNAKE_CASE = temp
# down
for i in range(17 ):
for j in range(20 ):
_SCREAMING_SNAKE_CASE = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_SCREAMING_SNAKE_CASE = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_SCREAMING_SNAKE_CASE = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_SCREAMING_SNAKE_CASE = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_SCREAMING_SNAKE_CASE = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_SCREAMING_SNAKE_CASE = temp
return maximum
if __name__ == "__main__":
print(solution())
| 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 5_12
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerModel(config=A__ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(A__ )
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> str:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Dict:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=A__ )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> str:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A__ )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(A__ )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(A__ )[0]
# TODO Replace vocab size
_SCREAMING_SNAKE_CASE = 5_00_00
_SCREAMING_SNAKE_CASE = [1, 6, vocab_size]
self.assertEqual(output.shape , A__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-4 )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_SCREAMING_SNAKE_CASE = emba(input_ids.shape )
_SCREAMING_SNAKE_CASE = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
_SCREAMING_SNAKE_CASE = emba.weight[:3, :5]
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ) -> int:
# 2,12,16,64
_SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_SCREAMING_SNAKE_CASE = embed_positions([2, 16, 7_68] )[None, None, :, :]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A__ , A__ , A__ )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
| 0 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : Any = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE_ )
print("""""" )
print(len(SCREAMING_SNAKE_CASE_ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 0 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XCLIPTextConfig()
# derive patch size from model name
_SCREAMING_SNAKE_CASE = model_name.find("""patch""" )
_SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
_SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
if model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = 3_36
_SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
_SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
_SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
_SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn.in_proj" in key:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if key.startswith("""visual""" ):
_SCREAMING_SNAKE_CASE = key_split[3]
_SCREAMING_SNAKE_CASE = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[
:dim
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
elif key.startswith("""mit""" ):
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.text_config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_SCREAMING_SNAKE_CASE = val.T
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
_SCREAMING_SNAKE_CASE = """eating_spaghetti.npy"""
elif num_frames == 32:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy"""
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , )
_SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
_SCREAMING_SNAKE_CASE = model_to_url[model_name]
_SCREAMING_SNAKE_CASE = 8
if "16-frames" in model_name:
_SCREAMING_SNAKE_CASE = 16
elif "shot" in model_name:
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
model.eval()
if "drive" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""]
else:
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""]
_SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24
_SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
_SCREAMING_SNAKE_CASE = outputs.logits_per_video
_SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 )
print("""Probs:""" , SCREAMING_SNAKE_CASE_ )
# kinetics-400
if model_name == "xclip-base-patch32":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase__ : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 1 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__ : Tuple = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
inspect_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE_ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE_ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
inspect_metric(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE_ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_dataset_config_info(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
get_dataset_config_info(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_dataset_config_names(SCREAMING_SNAKE_CASE_ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_dataset_infos(SCREAMING_SNAKE_CASE_ )
assert list(infos.keys() ) == expected_configs
_SCREAMING_SNAKE_CASE = expected_configs[0]
assert expected_config in infos
_SCREAMING_SNAKE_CASE = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_dataset_infos(SCREAMING_SNAKE_CASE_ )
assert expected_config in infos
_SCREAMING_SNAKE_CASE = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
get_dataset_split_names(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
| 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = params
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A__ ) -> Dict:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Tuple:
return len(self.lengths )
def UpperCamelCase ( self ) -> Dict:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.params.max_model_input_size
_SCREAMING_SNAKE_CASE = self.lengths > max_len
logger.info(F"Splitting {sum(A__ )} too long sequences." )
def divide_chunks(A__ , A__ ):
return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if self.params.mlm:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_SCREAMING_SNAKE_CASE = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ )
if sub_s[-1] != sep_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ )
assert len(A__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__ )
new_tok_ids.extend(A__ )
new_lengths.extend([len(A__ ) for l in sub_seqs] )
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array(A__ )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = self.lengths > 11
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def UpperCamelCase ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def UpperCamelCase ( self ) -> Optional[Any]:
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase ( self , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = [t[0] for t in batch]
_SCREAMING_SNAKE_CASE = [t[1] for t in batch]
assert len(A__ ) == len(A__ )
# Max for paddings
_SCREAMING_SNAKE_CASE = max(A__ )
# Pad token ids
if self.params.mlm:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""]
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids]
assert len(tk_ ) == len(A__ )
assert all(len(A__ ) == max_seq_len_ for t in tk_ )
_SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_)
_SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs)
return tk_t, lg_t
| 0 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : int = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = ["PoolFormerFeatureExtractor"]
UpperCamelCase__ : Dict = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Any = "▁"
UpperCamelCase__ : Any = {"vocab_file": "spiece.model"}
UpperCamelCase__ : int = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
UpperCamelCase__ : Optional[int] = {
"google/reformer-crime-and-punishment": 524_288,
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , A__ , A__="</s>" , A__="<unk>" , A__=[] , A__ = None , **A__ , ) -> None:
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A__ , unk_token=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
@property
def UpperCamelCase ( self ) -> Any:
return self.sp_model.get_piece_size()
def UpperCamelCase ( self ) -> Dict[str, int]:
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A__ )
def UpperCamelCase ( self , A__ ) -> List[Any]:
if index < self.sp_model.get_piece_size():
_SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A__ )
return token
def UpperCamelCase ( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(SCREAMING_SNAKE_CASE_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, 0
# length[i] shows the length of palindromic substring with center i
_SCREAMING_SNAKE_CASE = [1 for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
# for each character in new_string find corresponding palindromic string
_SCREAMING_SNAKE_CASE = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
_SCREAMING_SNAKE_CASE = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(SCREAMING_SNAKE_CASE_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_SCREAMING_SNAKE_CASE = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_SCREAMING_SNAKE_CASE = j - k + 1 # noqa: E741
_SCREAMING_SNAKE_CASE = j + k - 1
# update max_length and start position
if max_length < length[j]:
_SCREAMING_SNAKE_CASE = length[j]
_SCREAMING_SNAKE_CASE = j
# create that string
_SCREAMING_SNAKE_CASE = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def UpperCamelCase ( self ) -> Any:
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
_SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(A__ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase ( self ) -> str:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase ( self ) -> Dict:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
_SCREAMING_SNAKE_CASE = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 0 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape
_SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
_SCREAMING_SNAKE_CASE = mam_aaa["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""]
_SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCamelCase__ : Tuple = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , *A__ , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
super().__init__(*A__ , **A__ )
_SCREAMING_SNAKE_CASE = eval_examples
_SCREAMING_SNAKE_CASE = post_process_function
_SCREAMING_SNAKE_CASE = quant_trainer_args
_SCREAMING_SNAKE_CASE = 1_28 # default number of calibration samples
def UpperCamelCase ( self , A__=None ) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
_SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
_SCREAMING_SNAKE_CASE = self._remove_unused_columns(A__ , description="""Calibration""" )
return DataLoader(
A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , )
def UpperCamelCase ( self , A__=None ) -> str:
_SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
_SCREAMING_SNAKE_CASE = self.get_calib_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ )
model.eval()
quant_trainer.enable_calibration(A__ )
logger.info("""***** Running calibration *****""" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(A__ ):
# Prediction step
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prediction_step(A__ , A__ , prediction_loss_only=A__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = model
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
self.log(A__ )
else:
_SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ )
return metrics
def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ )
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ )
def UpperCamelCase ( self , A__="./" ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = next(iter(A__ ) )
# saving device - to make it consistent
_SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
_SCREAMING_SNAKE_CASE = tuple(v.to(A__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.model.to(A__ )
model.eval()
model.float()
_SCREAMING_SNAKE_CASE = model.module if hasattr(A__ , """module""" ) else model
quant_trainer.configure_model(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , """model.onnx""" )
logger.info(F"exporting model to {output_model_file}" )
_SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=A__ , )
logger.info("""onnx export finished""" )
| 0 | 1 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
UpperCamelCase__ : Tuple = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
UpperCamelCase__ : Tuple = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
UpperCamelCase__ : Optional[int] = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a (datasets.Metric):
"""simple docstring"""
def UpperCamelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def UpperCamelCase ( self , A__ , A__ , A__=None , A__=False , A__=False , A__=False , ) -> Optional[Any]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_SCREAMING_SNAKE_CASE = np.array([re.sub(A__ , """""" , A__ ) for x in predictions] )
_SCREAMING_SNAKE_CASE = np.array([re.sub(A__ , """""" , A__ ) for x in references] )
else:
_SCREAMING_SNAKE_CASE = np.asarray(A__ )
_SCREAMING_SNAKE_CASE = np.asarray(A__ )
if ignore_case:
_SCREAMING_SNAKE_CASE = np.char.lower(A__ )
_SCREAMING_SNAKE_CASE = np.char.lower(A__ )
if ignore_punctuation:
_SCREAMING_SNAKE_CASE = string.punctuation.maketrans("""""" , """""" , string.punctuation )
_SCREAMING_SNAKE_CASE = np.char.translate(A__ , table=A__ )
_SCREAMING_SNAKE_CASE = np.char.translate(A__ , table=A__ )
if ignore_numbers:
_SCREAMING_SNAKE_CASE = string.digits.maketrans("""""" , """""" , string.digits )
_SCREAMING_SNAKE_CASE = np.char.translate(A__ , table=A__ )
_SCREAMING_SNAKE_CASE = np.char.translate(A__ , table=A__ )
_SCREAMING_SNAKE_CASE = predictions == references
return {"exact_match": np.mean(A__ ) * 1_00}
| 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UniSpeechSatForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = downstream_dict["""projector.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""projector.bias"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.post_net.linear.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.post_net.linear.bias"""]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UniSpeechSatForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = downstream_dict["""model.linear.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.linear.bias"""]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UniSpeechSatForXVector.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = downstream_dict["""connector.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_SCREAMING_SNAKE_CASE = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
_SCREAMING_SNAKE_CASE = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
_SCREAMING_SNAKE_CASE = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = checkpoint["""Downstream"""]
_SCREAMING_SNAKE_CASE = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
_SCREAMING_SNAKE_CASE = convert_classification(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
_SCREAMING_SNAKE_CASE = convert_diarization(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif arch.endswith("""ForXVector""" ):
_SCREAMING_SNAKE_CASE = convert_xvector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
_SCREAMING_SNAKE_CASE = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
UpperCamelCase__ : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("""https://huggingface.co""" )
| 0 | 1 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for line in lines:
_SCREAMING_SNAKE_CASE = re.sub(r"""#.*""" , """""" , SCREAMING_SNAKE_CASE_ ) # remove comments
if line:
filtered_lines.append(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = """\n""".join(SCREAMING_SNAKE_CASE_ )
# Make a hash from all this code
_SCREAMING_SNAKE_CASE = full_str.encode("""utf-8""" )
return shaaaa(SCREAMING_SNAKE_CASE_ ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase__ : List[str] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase__ : str = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase__ : Union[str, Any] = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
UpperCamelCase__ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> Iterator[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE_ ):
yield num
num += 1
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2_00_00_00 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda SCREAMING_SNAKE_CASE_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A__ ) , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
| 0 | 1 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__ = None , A__ = None , A__ = True , A__ = None , A__ = False , A__ = None , A__ = True , A__ = "arrow" , **A__ , ) -> Optional[int]:
super().__init__(
split=A__ , features=A__ , cache_dir=A__ , keep_in_memory=A__ , streaming=A__ , **A__ , )
_SCREAMING_SNAKE_CASE = load_from_cache_file
_SCREAMING_SNAKE_CASE = file_format
_SCREAMING_SNAKE_CASE = Spark(
df=A__ , features=A__ , cache_dir=A__ , working_dir=A__ , **A__ , )
def UpperCamelCase ( self ) -> Optional[int]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_SCREAMING_SNAKE_CASE = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=A__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]:
super().__init__(self , **A__ )
_SCREAMING_SNAKE_CASE = repo_info
_SCREAMING_SNAKE_CASE = token
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]:
if not isinstance(self.repo_info , A__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha )
return fsspec.open(
A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase ( self , A__ , **A__ ) -> str:
self._get_dirs()
_SCREAMING_SNAKE_CASE = self._strip_protocol(A__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A__ )
def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]:
self._get_dirs()
_SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
_SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = p.parent
if root == path:
_SCREAMING_SNAKE_CASE = f
_SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 0 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=A__ , cache_dir=A__ )
_SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(A__ , os.listdir(A__ )[0] , """snapshots""" ) )]
_SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=A__ )
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(A__ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
_SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(A__ ) == num_samples
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=A__ )
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = 50
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ )
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = 50
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = 50
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=A__ , steps_offset=1 , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=A__ , safety_checker=A__ , )
_SCREAMING_SNAKE_CASE = scheduler.create_state()
_SCREAMING_SNAKE_CASE = scheduler_state
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = 50
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0 ) , A__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ , )
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
_SCREAMING_SNAKE_CASE = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ , use_memory_efficient_attention=A__ , )
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , jit=A__ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
_SCREAMING_SNAKE_CASE = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = [parquet_path]
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=("train",) ) -> List[str]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for split in splits:
_SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if split:
_SCREAMING_SNAKE_CASE = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE = """train"""
_SCREAMING_SNAKE_CASE = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
| 0 | 1 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]:
super().__init__(self , **A__ )
_SCREAMING_SNAKE_CASE = repo_info
_SCREAMING_SNAKE_CASE = token
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]:
if not isinstance(self.repo_info , A__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha )
return fsspec.open(
A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase ( self , A__ , **A__ ) -> str:
self._get_dirs()
_SCREAMING_SNAKE_CASE = self._strip_protocol(A__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A__ )
def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]:
self._get_dirs()
_SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
_SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = p.parent
if root == path:
_SCREAMING_SNAKE_CASE = f
_SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_SCREAMING_SNAKE_CASE = grid[0]
for row_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
_SCREAMING_SNAKE_CASE = grid[row_n]
_SCREAMING_SNAKE_CASE = fill_row(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase__ : str = "sshleifer/tiny-mbart"
@require_torch
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , )
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=A__ )
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Any:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> str:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> List[str]:
self.run_seqaseq_quick(
distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ )
@require_apex
@require_torch_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCamelCase ( self , A__ ) -> List[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_SCREAMING_SNAKE_CASE = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_SCREAMING_SNAKE_CASE = experiments[experiment_id]
_SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_SCREAMING_SNAKE_CASE = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] )
_SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) )
self.assertEqual(A__ , data["""n_matches"""] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
# test if do_predict saves generations and metrics
_SCREAMING_SNAKE_CASE = os.listdir(A__ )
_SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A__ ) -> Tuple[int, float]:
_SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0"""
_SCREAMING_SNAKE_CASE = self.run_trainer(
max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig
_SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_SCREAMING_SNAKE_CASE = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict:
_SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split()
_SCREAMING_SNAKE_CASE = """
--do_predict
""".split()
_SCREAMING_SNAKE_CASE = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_SCREAMING_SNAKE_CASE = get_gpu_count()
_SCREAMING_SNAKE_CASE = get_torch_dist_unique_port()
_SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ , env=self.get_env() )
else:
_SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args
with patch.object(A__ , """argv""" , A__ ):
main()
return output_dir
| 0 | 1 |
'''simple docstring'''
UpperCamelCase__ : Dict = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
_SCREAMING_SNAKE_CASE = """"""
for word in coded.split():
while len(SCREAMING_SNAKE_CASE_ ) != 0:
decoded += decode_dict[word[:5]]
_SCREAMING_SNAKE_CASE = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 |
'''simple docstring'''
import sys
UpperCamelCase__ : int = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ):
_SCREAMING_SNAKE_CASE = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_SCREAMING_SNAKE_CASE = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
while number > 0:
_SCREAMING_SNAKE_CASE = number % 10
sum_of_digits += last_digit
_SCREAMING_SNAKE_CASE = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 1_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = factorial(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = split_and_add(SCREAMING_SNAKE_CASE_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 0 |
'''simple docstring'''
UpperCamelCase__ : Dict = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
_SCREAMING_SNAKE_CASE = """"""
for word in coded.split():
while len(SCREAMING_SNAKE_CASE_ ) != 0:
decoded += decode_dict[word[:5]]
_SCREAMING_SNAKE_CASE = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 1 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=10_00 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_SCREAMING_SNAKE_CASE = n - 1
_SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_SCREAMING_SNAKE_CASE = 0
while count < prec:
_SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
_SCREAMING_SNAKE_CASE = bin_exp_mod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if b != 1:
_SCREAMING_SNAKE_CASE = True
for _ in range(SCREAMING_SNAKE_CASE_ ):
if b == n - 1:
_SCREAMING_SNAKE_CASE = False
break
_SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCamelCase__ : Any = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape
_SCREAMING_SNAKE_CASE = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
_SCREAMING_SNAKE_CASE = mam_aaa["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""]
_SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
if token is not None:
_SCREAMING_SNAKE_CASE = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_SCREAMING_SNAKE_CASE = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_SCREAMING_SNAKE_CASE = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
if token is not None:
_SCREAMING_SNAKE_CASE = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
_SCREAMING_SNAKE_CASE = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_SCREAMING_SNAKE_CASE = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
if token is not None:
_SCREAMING_SNAKE_CASE = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_SCREAMING_SNAKE_CASE = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = result.headers["""Location"""]
_SCREAMING_SNAKE_CASE = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , F"{artifact_name}.zip" )
with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_SCREAMING_SNAKE_CASE = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_SCREAMING_SNAKE_CASE = line[: line.index(""": """ )]
_SCREAMING_SNAKE_CASE = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_SCREAMING_SNAKE_CASE = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_SCREAMING_SNAKE_CASE = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` "
F"and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
_SCREAMING_SNAKE_CASE = None
if job_name and job_links:
_SCREAMING_SNAKE_CASE = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
_SCREAMING_SNAKE_CASE = counter.most_common()
_SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_SCREAMING_SNAKE_CASE = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_SCREAMING_SNAKE_CASE = test.split("""/""" )[2]
else:
_SCREAMING_SNAKE_CASE = None
return test
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
_SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
_SCREAMING_SNAKE_CASE = {x[2] for x in logs}
_SCREAMING_SNAKE_CASE = {}
for test in tests:
_SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_SCREAMING_SNAKE_CASE = counter.most_common()
_SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
_SCREAMING_SNAKE_CASE = {"""count""": n_errors, """errors""": error_counts}
_SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """| no. | error | status |"""
_SCREAMING_SNAKE_CASE = """|-:|:-|:-|"""
_SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
_SCREAMING_SNAKE_CASE = reduced_by_error[error]["""count"""]
_SCREAMING_SNAKE_CASE = F"| {count} | {error[:1_00]} | |"
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """| model | no. of errors | major error | count |"""
_SCREAMING_SNAKE_CASE = """|-:|-:|-:|-:|"""
_SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
_SCREAMING_SNAKE_CASE = reduced_by_model[model]["""count"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["""errors"""].items() )[0]
_SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
UpperCamelCase__ : str = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCamelCase__ : List[Any] = get_job_links(args.workflow_run_id, token=args.token)
UpperCamelCase__ : List[str] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCamelCase__ : Dict = k.find(" / ")
UpperCamelCase__ : Optional[Any] = k[index + len(" / ") :]
UpperCamelCase__ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCamelCase__ : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCamelCase__ : List[str] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCamelCase__ : str = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCamelCase__ : List[str] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCamelCase__ : Optional[Any] = reduce_by_error(errors)
UpperCamelCase__ : Optional[int] = reduce_by_model(errors)
UpperCamelCase__ : Optional[int] = make_github_table(reduced_by_error)
UpperCamelCase__ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : str = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 1 |
'''simple docstring'''
UpperCamelCase__ : Optional[int] = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
_SCREAMING_SNAKE_CASE = Stack()
_SCREAMING_SNAKE_CASE = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
_SCREAMING_SNAKE_CASE = operator_stack.peek()
operator_stack.pop()
_SCREAMING_SNAKE_CASE = operand_stack.peek()
operand_stack.pop()
_SCREAMING_SNAKE_CASE = operand_stack.peek()
operand_stack.pop()
_SCREAMING_SNAKE_CASE = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor'
SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , A__=None , A__=None , **A__ ) -> int:
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
_SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Dict:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.decode(*A__ , **A__ )
@property
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , )
return self.image_processor_class
| 0 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCamelCase)
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True})
SCREAMING_SNAKE_CASE = Features({'text': Value('string')})
SCREAMING_SNAKE_CASE = Features({})
SCREAMING_SNAKE_CASE = "text"
@property
def UpperCamelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 0 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase__ : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCamelCase__ : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCamelCase__ : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a (datasets.Metric):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def UpperCamelCase ( self , A__ , A__ , A__=None ) -> List[str]:
return {
"matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ),
}
| 0 | 1 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(SCREAMING_SNAKE_CASE_ ):
print(F"{i}\t\t{d}" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count
_SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_SCREAMING_SNAKE_CASE = distance[u] + w
_SCREAMING_SNAKE_CASE = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : int = int(input("Enter number of vertices: ").strip())
UpperCamelCase__ : int = int(input("Enter number of edges: ").strip())
UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
UpperCamelCase__ : Optional[Any] = {"src": src, "dst": dest, "weight": weight}
UpperCamelCase__ : Optional[Any] = int(input("\nEnter shortest path source:").strip())
UpperCamelCase__ : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_SCREAMING_SNAKE_CASE = """"""
else:
_SCREAMING_SNAKE_CASE = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
_SCREAMING_SNAKE_CASE = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViTMSNConfig()
_SCREAMING_SNAKE_CASE = 10_00
_SCREAMING_SNAKE_CASE = """datasets/huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 3_84
_SCREAMING_SNAKE_CASE = 15_36
_SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = ViTMSNModel(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""target_encoder"""]
_SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
_SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
_SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase__ : List[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 5_12
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerModel(config=A__ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(A__ )
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> str:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Dict:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=A__ )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> str:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A__ )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(A__ )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(A__ )[0]
# TODO Replace vocab size
_SCREAMING_SNAKE_CASE = 5_00_00
_SCREAMING_SNAKE_CASE = [1, 6, vocab_size]
self.assertEqual(output.shape , A__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-4 )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_SCREAMING_SNAKE_CASE = emba(input_ids.shape )
_SCREAMING_SNAKE_CASE = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
_SCREAMING_SNAKE_CASE = emba.weight[:3, :5]
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ) -> int:
# 2,12,16,64
_SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_SCREAMING_SNAKE_CASE = embed_positions([2, 16, 7_68] )[None, None, :, :]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A__ , A__ , A__ )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
| 0 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Optional[Any] = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ : Dict = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
UpperCamelCase__ : List[Any] = {
"gpt-neox-20b": 2_048,
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , A__=None , A__=None , A__=None , A__="<|endoftext|>" , A__="<|endoftext|>" , A__="<|endoftext|>" , A__=False , **A__ , ) -> List[Any]:
super().__init__(
A__ , A__ , tokenizer_file=A__ , unk_token=A__ , bos_token=A__ , eos_token=A__ , add_prefix_space=A__ , **A__ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A__ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(A__ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**A__ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCamelCase ( self , A__ ) -> List[int]:
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A__ , add_special_tokens=A__ ) + [self.eos_token_id] )
if len(A__ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 0 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XCLIPTextConfig()
# derive patch size from model name
_SCREAMING_SNAKE_CASE = model_name.find("""patch""" )
_SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
_SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
if model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = 3_36
_SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
_SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
_SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
_SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn.in_proj" in key:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if key.startswith("""visual""" ):
_SCREAMING_SNAKE_CASE = key_split[3]
_SCREAMING_SNAKE_CASE = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[
:dim
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
elif key.startswith("""mit""" ):
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.text_config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_SCREAMING_SNAKE_CASE = val.T
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
_SCREAMING_SNAKE_CASE = """eating_spaghetti.npy"""
elif num_frames == 32:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy"""
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , )
_SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
_SCREAMING_SNAKE_CASE = model_to_url[model_name]
_SCREAMING_SNAKE_CASE = 8
if "16-frames" in model_name:
_SCREAMING_SNAKE_CASE = 16
elif "shot" in model_name:
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
model.eval()
if "drive" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""]
else:
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""]
_SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24
_SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
_SCREAMING_SNAKE_CASE = outputs.logits_per_video
_SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 )
print("""Probs:""" , SCREAMING_SNAKE_CASE_ )
# kinetics-400
if model_name == "xclip-base-patch32":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase__ : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 1 |
'''simple docstring'''
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self , A__ = 0 ) -> Tuple:
_SCREAMING_SNAKE_CASE = key
def UpperCamelCase ( self , A__ , A__ ) -> list[str]:
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
_SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(A__ ) ^ key ) for ch in content]
def UpperCamelCase ( self , A__ , A__ ) -> list[str]:
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
_SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(A__ ) ^ key ) for ch in content]
def UpperCamelCase ( self , A__ , A__ = 0 ) -> str:
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
_SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
_SCREAMING_SNAKE_CASE = """"""
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def UpperCamelCase ( self , A__ , A__ = 0 ) -> str:
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
_SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
_SCREAMING_SNAKE_CASE = """"""
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def UpperCamelCase ( self , A__ , A__ = 0 ) -> bool:
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A__ , A__ ) )
except OSError:
return False
return True
def UpperCamelCase ( self , A__ , A__ ) -> bool:
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A__ , A__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = params
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A__ ) -> Dict:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Tuple:
return len(self.lengths )
def UpperCamelCase ( self ) -> Dict:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.params.max_model_input_size
_SCREAMING_SNAKE_CASE = self.lengths > max_len
logger.info(F"Splitting {sum(A__ )} too long sequences." )
def divide_chunks(A__ , A__ ):
return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if self.params.mlm:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_SCREAMING_SNAKE_CASE = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ )
if sub_s[-1] != sep_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ )
assert len(A__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__ )
new_tok_ids.extend(A__ )
new_lengths.extend([len(A__ ) for l in sub_seqs] )
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array(A__ )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = self.lengths > 11
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def UpperCamelCase ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def UpperCamelCase ( self ) -> Optional[Any]:
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase ( self , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = [t[0] for t in batch]
_SCREAMING_SNAKE_CASE = [t[1] for t in batch]
assert len(A__ ) == len(A__ )
# Max for paddings
_SCREAMING_SNAKE_CASE = max(A__ )
# Pad token ids
if self.params.mlm:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""]
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids]
assert len(tk_ ) == len(A__ )
assert all(len(A__ ) == max_seq_len_ for t in tk_ )
_SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_)
_SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs)
return tk_t, lg_t
| 0 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = CTRLTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
_SCREAMING_SNAKE_CASE = dict(zip(A__ , range(len(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
_SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
def UpperCamelCase ( self , **A__ ) -> Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = """adapt react readapt apt"""
_SCREAMING_SNAKE_CASE = """adapt react readapt apt"""
return input_text, output_text
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_SCREAMING_SNAKE_CASE = """adapt react readapt apt"""
_SCREAMING_SNAKE_CASE = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
_SCREAMING_SNAKE_CASE = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
| 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Any = "▁"
UpperCamelCase__ : Any = {"vocab_file": "spiece.model"}
UpperCamelCase__ : int = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
UpperCamelCase__ : Optional[int] = {
"google/reformer-crime-and-punishment": 524_288,
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , A__ , A__="</s>" , A__="<unk>" , A__=[] , A__ = None , **A__ , ) -> None:
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A__ , unk_token=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
@property
def UpperCamelCase ( self ) -> Any:
return self.sp_model.get_piece_size()
def UpperCamelCase ( self ) -> Dict[str, int]:
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A__ )
def UpperCamelCase ( self , A__ ) -> List[Any]:
if index < self.sp_model.get_piece_size():
_SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A__ )
return token
def UpperCamelCase ( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 0 | 1 |
'''simple docstring'''
import random
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = a[left_index]
_SCREAMING_SNAKE_CASE = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE_ ):
if a[j] < pivot:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = a[i], a[j]
i += 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = a[i - 1], a[left_index]
return i - 1
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
if left < right:
_SCREAMING_SNAKE_CASE = random.randint(SCREAMING_SNAKE_CASE_ , right - 1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_SCREAMING_SNAKE_CASE = partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
quick_sort_random(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE_ , pivot_index + 1 , SCREAMING_SNAKE_CASE_ ) # recursive quicksort to the right of the pivot point
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""" ).strip()
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for item in user_input.split(""",""" )]
quick_sort_random(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def UpperCamelCase ( self ) -> Any:
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
_SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(A__ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase ( self ) -> str:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase ( self ) -> Dict:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
_SCREAMING_SNAKE_CASE = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 0 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'timesformer'
def __init__( self , A__=2_24 , A__=16 , A__=3 , A__=8 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.0 , A__=0.0 , A__=0.02 , A__=1E-6 , A__=True , A__="divided_space_time" , A__=0 , **A__ , ) -> Optional[Any]:
super().__init__(**A__ )
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = num_frames
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = qkv_bias
_SCREAMING_SNAKE_CASE = attention_type
_SCREAMING_SNAKE_CASE = drop_path_rate
| 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCamelCase__ : Tuple = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , *A__ , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
super().__init__(*A__ , **A__ )
_SCREAMING_SNAKE_CASE = eval_examples
_SCREAMING_SNAKE_CASE = post_process_function
_SCREAMING_SNAKE_CASE = quant_trainer_args
_SCREAMING_SNAKE_CASE = 1_28 # default number of calibration samples
def UpperCamelCase ( self , A__=None ) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
_SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
_SCREAMING_SNAKE_CASE = self._remove_unused_columns(A__ , description="""Calibration""" )
return DataLoader(
A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , )
def UpperCamelCase ( self , A__=None ) -> str:
_SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
_SCREAMING_SNAKE_CASE = self.get_calib_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ )
model.eval()
quant_trainer.enable_calibration(A__ )
logger.info("""***** Running calibration *****""" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(A__ ):
# Prediction step
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prediction_step(A__ , A__ , prediction_loss_only=A__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = model
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
self.log(A__ )
else:
_SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ )
return metrics
def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ )
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ )
def UpperCamelCase ( self , A__="./" ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = next(iter(A__ ) )
# saving device - to make it consistent
_SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
_SCREAMING_SNAKE_CASE = tuple(v.to(A__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.model.to(A__ )
model.eval()
model.float()
_SCREAMING_SNAKE_CASE = model.module if hasattr(A__ , """module""" ) else model
quant_trainer.configure_model(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , """model.onnx""" )
logger.info(F"exporting model to {output_model_file}" )
_SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=A__ , )
logger.info("""onnx export finished""" )
| 0 | 1 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
UpperCamelCase__ : int = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
UpperCamelCase__ : str = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """rougeLsum"""
_SCREAMING_SNAKE_CASE = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=[k] )[k]
_SCREAMING_SNAKE_CASE = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""rouge1""", """rouge2""", """rougeL"""]
_SCREAMING_SNAKE_CASE = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=SCREAMING_SNAKE_CASE_ )
assert score_sep == score_no_sep
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
_SCREAMING_SNAKE_CASE = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ ) == calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
_SCREAMING_SNAKE_CASE = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
_SCREAMING_SNAKE_CASE = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rouge_keys=["""rougeLsum"""] , newline_sep=SCREAMING_SNAKE_CASE_ )["""rougeLsum"""]
_SCREAMING_SNAKE_CASE = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
_SCREAMING_SNAKE_CASE = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
UpperCamelCase__ : Dict = datasets.logging.get_logger(__name__)
UpperCamelCase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
UpperCamelCase__ : List[Any] = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
UpperCamelCase__ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a (datasets.Metric):
"""simple docstring"""
def UpperCamelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def UpperCamelCase ( self , A__ ) -> Optional[Any]:
if self.config_name == "default":
_SCREAMING_SNAKE_CASE = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
_SCREAMING_SNAKE_CASE = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__=None , A__=False ) -> Tuple:
if gpus is None:
_SCREAMING_SNAKE_CASE = 1 if torch.cuda.is_available() else 0
_SCREAMING_SNAKE_CASE = {"""src""": sources, """mt""": predictions, """ref""": references}
_SCREAMING_SNAKE_CASE = [dict(zip(A__ , A__ ) ) for t in zip(*data.values() )]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.scorer.predict(A__ , gpus=A__ , progress_bar=A__ )
return {"mean_score": mean_score, "scores": scores}
| 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("""https://huggingface.co""" )
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
for ch in input_str:
_SCREAMING_SNAKE_CASE = ord(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = pow(2 , SCREAMING_SNAKE_CASE_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> Iterator[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE_ ):
yield num
num += 1
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 2_00_00_00 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda SCREAMING_SNAKE_CASE_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XCLIPTextConfig()
# derive patch size from model name
_SCREAMING_SNAKE_CASE = model_name.find("""patch""" )
_SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
_SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
if model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = 3_36
_SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
_SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
_SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
_SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn.in_proj" in key:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if key.startswith("""visual""" ):
_SCREAMING_SNAKE_CASE = key_split[3]
_SCREAMING_SNAKE_CASE = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[
:dim
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
elif key.startswith("""mit""" ):
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.text_config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_SCREAMING_SNAKE_CASE = val.T
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
_SCREAMING_SNAKE_CASE = """eating_spaghetti.npy"""
elif num_frames == 32:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy"""
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , )
_SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
_SCREAMING_SNAKE_CASE = model_to_url[model_name]
_SCREAMING_SNAKE_CASE = 8
if "16-frames" in model_name:
_SCREAMING_SNAKE_CASE = 16
elif "shot" in model_name:
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
model.eval()
if "drive" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""]
else:
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""]
_SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24
_SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
_SCREAMING_SNAKE_CASE = outputs.logits_per_video
_SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 )
print("""Probs:""" , SCREAMING_SNAKE_CASE_ )
# kinetics-400
if model_name == "xclip-base-patch32":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase__ : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A__ ) , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
| 0 | 1 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_SCREAMING_SNAKE_CASE = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
# set absolute/relative position embeddings parameter
_SCREAMING_SNAKE_CASE = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_SCREAMING_SNAKE_CASE = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
elif task == "WTQ":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = True
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE = 0.66_4694
_SCREAMING_SNAKE_CASE = 0.20_7951
_SCREAMING_SNAKE_CASE = 0.12_1194
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 0.035_2513
_SCREAMING_SNAKE_CASE = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = False
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE = 36.4519
_SCREAMING_SNAKE_CASE = 0.90_3421
_SCREAMING_SNAKE_CASE = 222.088
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 0.76_3141
_SCREAMING_SNAKE_CASE = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
elif task == "TABFACT":
_SCREAMING_SNAKE_CASE = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
elif task == "MLM":
_SCREAMING_SNAKE_CASE = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
elif task == "INTERMEDIATE_PRETRAINING":
_SCREAMING_SNAKE_CASE = TapasModel(config=SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F"Task {task} not supported." )
print(F"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model (weights and configuration)
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Save tokenizer files
print(F"Save tokenizer files to {pytorch_dump_path}" )
_SCREAMING_SNAKE_CASE = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=5_12 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]:
super().__init__(self , **A__ )
_SCREAMING_SNAKE_CASE = repo_info
_SCREAMING_SNAKE_CASE = token
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]:
if not isinstance(self.repo_info , A__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha )
return fsspec.open(
A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase ( self , A__ , **A__ ) -> str:
self._get_dirs()
_SCREAMING_SNAKE_CASE = self._strip_protocol(A__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A__ )
def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]:
self._get_dirs()
_SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
_SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = p.parent
if root == path:
_SCREAMING_SNAKE_CASE = f
_SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 0 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
if not head:
return True
# split the list to two parts
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = head.next, head
while fast and fast.next:
_SCREAMING_SNAKE_CASE = fast.next.next
_SCREAMING_SNAKE_CASE = slow.next
_SCREAMING_SNAKE_CASE = slow.next
_SCREAMING_SNAKE_CASE = None # Don't forget here! But forget still works!
# reverse the second part
_SCREAMING_SNAKE_CASE = None
while second:
_SCREAMING_SNAKE_CASE = second.next
_SCREAMING_SNAKE_CASE = node
_SCREAMING_SNAKE_CASE = second
_SCREAMING_SNAKE_CASE = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_SCREAMING_SNAKE_CASE = node.next
_SCREAMING_SNAKE_CASE = head.next
return True
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = head
while fast and fast.next:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fast.next.next, slow.next
# 2. Push the second half into the stack
_SCREAMING_SNAKE_CASE = [slow.val]
while slow.next:
_SCREAMING_SNAKE_CASE = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_SCREAMING_SNAKE_CASE = cur.next
return True
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
_SCREAMING_SNAKE_CASE = [pos]
_SCREAMING_SNAKE_CASE = head.next
pos += 1
_SCREAMING_SNAKE_CASE = pos - 1
_SCREAMING_SNAKE_CASE = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
_SCREAMING_SNAKE_CASE = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = [parquet_path]
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=("train",) ) -> List[str]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for split in splits:
_SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if split:
_SCREAMING_SNAKE_CASE = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE = """train"""
_SCREAMING_SNAKE_CASE = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
| 0 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'vivit'
def __init__( self , A__=2_24 , A__=32 , A__=[2, 16, 16] , A__=3 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu_fast" , A__=0.0 , A__=0.0 , A__=0.02 , A__=1E-06 , A__=True , **A__ , ) -> Any:
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = num_frames
_SCREAMING_SNAKE_CASE = tubelet_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = qkv_bias
super().__init__(**A__ )
| 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
from itertools import permutations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_SCREAMING_SNAKE_CASE = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase__ : str = "sshleifer/tiny-mbart"
@require_torch
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , )
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=A__ )
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Any:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> str:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> List[str]:
self.run_seqaseq_quick(
distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ )
@require_apex
@require_torch_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCamelCase ( self , A__ ) -> List[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_SCREAMING_SNAKE_CASE = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_SCREAMING_SNAKE_CASE = experiments[experiment_id]
_SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_SCREAMING_SNAKE_CASE = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] )
_SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) )
self.assertEqual(A__ , data["""n_matches"""] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
# test if do_predict saves generations and metrics
_SCREAMING_SNAKE_CASE = os.listdir(A__ )
_SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A__ ) -> Tuple[int, float]:
_SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0"""
_SCREAMING_SNAKE_CASE = self.run_trainer(
max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig
_SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_SCREAMING_SNAKE_CASE = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict:
_SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split()
_SCREAMING_SNAKE_CASE = """
--do_predict
""".split()
_SCREAMING_SNAKE_CASE = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_SCREAMING_SNAKE_CASE = get_gpu_count()
_SCREAMING_SNAKE_CASE = get_torch_dist_unique_port()
_SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ , env=self.get_env() )
else:
_SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args
with patch.object(A__ , """argv""" , A__ ):
main()
return output_dir
| 0 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
if "model" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.split(""".""" )[0].split("""_""" )[-1]
_SCREAMING_SNAKE_CASE = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
_SCREAMING_SNAKE_CASE = """yoso.""" + orig_key
return orig_key
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_SCREAMING_SNAKE_CASE = val
_SCREAMING_SNAKE_CASE = orig_state_dict["""cls.predictions.decoder.bias"""]
_SCREAMING_SNAKE_CASE = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model_state_dict"""]
_SCREAMING_SNAKE_CASE = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ )
print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) )
model.eval()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__ : int = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 0 |
'''simple docstring'''
import sys
UpperCamelCase__ : int = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ):
_SCREAMING_SNAKE_CASE = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_SCREAMING_SNAKE_CASE = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase__ : Any = logging.getLogger(__name__)
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__=-1 ) -> str:
# in NER datasets, the last column is usually reserved for NER label
_SCREAMING_SNAKE_CASE = label_idx
def UpperCamelCase ( self , A__ , A__ ) -> List[InputExample]:
if isinstance(A__ , A__ ):
_SCREAMING_SNAKE_CASE = mode.value
_SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{mode}.txt" )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = []
with open(A__ , encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=A__ , labels=A__ ) )
guid_index += 1
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = line.split(""" """ )
words.append(splits[0] )
if len(A__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=A__ , labels=A__ ) )
return examples
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(A__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_SCREAMING_SNAKE_CASE = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(A__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def UpperCamelCase ( self , A__ ) -> List[str]:
if path:
with open(A__ , """r""" ) as f:
_SCREAMING_SNAKE_CASE = f.read().splitlines()
if "O" not in labels:
_SCREAMING_SNAKE_CASE = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def UpperCamelCase ( self , A__ ) -> List[str]:
if path:
with open(A__ , """r""" ) as f:
_SCREAMING_SNAKE_CASE = f.read().splitlines()
if "O" not in labels:
_SCREAMING_SNAKE_CASE = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self , A__ , A__ ) -> List[InputExample]:
if isinstance(A__ , A__ ):
_SCREAMING_SNAKE_CASE = mode.value
_SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{mode}.txt" )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = []
with open(A__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(A__ ):
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(A__ ) == len(A__ )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=A__ , labels=A__ ) )
guid_index += 1
return examples
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Dict:
_SCREAMING_SNAKE_CASE = 0
for sentence in parse_incr(A__ ):
_SCREAMING_SNAKE_CASE = preds_list[example_id]
_SCREAMING_SNAKE_CASE = """"""
for token in sentence:
out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(A__ )
example_id += 1
def UpperCamelCase ( self , A__ ) -> List[str]:
if path:
with open(A__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 0 |
'''simple docstring'''
UpperCamelCase__ : Dict = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
UpperCamelCase__ : str = {value: key for key, value in encode_dict.items()}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
_SCREAMING_SNAKE_CASE = """"""
for word in coded.split():
while len(SCREAMING_SNAKE_CASE_ ) != 0:
decoded += decode_dict[word[:5]]
_SCREAMING_SNAKE_CASE = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 1 |