code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
snake_case_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE_ , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ = primes[:idx]
break
snake_case_ , snake_case_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ = False
for r in range(SCREAMING_SNAKE_CASE_ ):
snake_case_ = pow(SCREAMING_SNAKE_CASE_ , d * 2**r , SCREAMING_SNAKE_CASE_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _a ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 700 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__lowercase: ClassVar[Features] = Features({"""audio""": Audio()})
__lowercase: ClassVar[Features] = Features({"""transcription""": Value("""string""")})
__lowercase: str = "audio"
__lowercase: str = "transcription"
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , UpperCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.input_schema.copy()
snake_case_ = features[self.audio_column]
snake_case_ = input_schema
return task_template
@property
def lowerCAmelCase ( self : List[str] ) ->Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 2 | 0 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A (_snake_case):
'''simple docstring'''
__lowercase: List[str] = """"""
__lowercase: List[Any] = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[DatasetInfo] = None , UpperCAmelCase_ : Optional[str] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(self , **lowerCAmelCase__ )
snake_case_ = repo_info
snake_case_ = token
snake_case_ = None
def lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
if self.dir_cache is None:
snake_case_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
snake_case_ = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCAmelCase__ ): {"""name""": str(lowerCAmelCase__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , **UpperCAmelCase_ : List[str] , ) ->Dict:
"""simple docstring"""
if not isinstance(self.repo_info , lowerCAmelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
snake_case_ = hf_hub_url(self.repo_info.id , lowerCAmelCase__ , revision=self.repo_info.sha )
return fsspec.open(
lowerCAmelCase__ , mode=lowerCAmelCase__ , headers=get_authentication_headers_for_url(lowerCAmelCase__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
self._get_dirs()
snake_case_ = self._strip_protocol(lowerCAmelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase__ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Any ) ->Union[str, Any]:
"""simple docstring"""
self._get_dirs()
snake_case_ = PurePosixPath(path.strip("""/""" ) )
snake_case_ = {}
for p, f in self.dir_cache.items():
snake_case_ = PurePosixPath(p.strip("""/""" ) )
snake_case_ = p.parent
if root == path:
snake_case_ = f
snake_case_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 701 |
"""simple docstring"""
from functools import reduce
__SCREAMING_SNAKE_CASE : Tuple = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _a ( _SCREAMING_SNAKE_CASE = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 2 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __A (_SCREAMING_SNAKE_CASE):
'''simple docstring'''
__lowercase: Optional[int] = "mctct"
def __init__( self : str , UpperCAmelCase_ : Union[str, Any]=8_065 , UpperCAmelCase_ : Dict=1_536 , UpperCAmelCase_ : Tuple=36 , UpperCAmelCase_ : List[str]=6_144 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int=384 , UpperCAmelCase_ : str=920 , UpperCAmelCase_ : int=1E-5 , UpperCAmelCase_ : int=0.3 , UpperCAmelCase_ : List[str]="relu" , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=0.3 , UpperCAmelCase_ : Optional[Any]=0.3 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Optional[Any]=0.3 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=(7,) , UpperCAmelCase_ : List[Any]=(3,) , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]="sum" , UpperCAmelCase_ : Optional[Any]=False , **UpperCAmelCase_ : Any , ) ->int:
"""simple docstring"""
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = max_position_embeddings
snake_case_ = layer_norm_eps
snake_case_ = layerdrop
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = conv_glu_dim
snake_case_ = conv_dropout
snake_case_ = num_conv_layers
snake_case_ = input_feat_per_channel
snake_case_ = input_channels
snake_case_ = conv_channels
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ = list(A_ )
snake_case_ = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 702 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = """mctct"""
def __init__( self : Dict , UpperCAmelCase_ : List[Any]=8_065 , UpperCAmelCase_ : Tuple=1_536 , UpperCAmelCase_ : Optional[Any]=36 , UpperCAmelCase_ : int=6_144 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Any=384 , UpperCAmelCase_ : List[str]=920 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : Any=0.3 , UpperCAmelCase_ : Tuple="relu" , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=0.3 , UpperCAmelCase_ : str=0.3 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=0.3 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Optional[Any]=(7,) , UpperCAmelCase_ : Optional[Any]=(3,) , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[str]="sum" , UpperCAmelCase_ : Union[str, Any]=False , **UpperCAmelCase_ : Any , ) ->Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = max_position_embeddings
snake_case_ = layer_norm_eps
snake_case_ = layerdrop
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = conv_glu_dim
snake_case_ = conv_dropout
snake_case_ = num_conv_layers
snake_case_ = input_feat_per_channel
snake_case_ = input_channels
snake_case_ = conv_channels
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 2 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE = 100 ) -> int:
return sum(int(_SCREAMING_SNAKE_CASE ) for x in str(factorial(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 2 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class __A (lowercase__):
'''simple docstring'''
__lowercase: Tuple = """mvp"""
__lowercase: Union[str, Any] = ["""past_key_values"""]
__lowercase: Tuple = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any=50_267 , UpperCAmelCase_ : str=1_024 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Any=4_096 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : List[Any]=4_096 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Optional[int]=1_024 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : int=100 , UpperCAmelCase_ : Optional[Any]=800 , **UpperCAmelCase_ : List[str] , ) ->List[Any]:
"""simple docstring"""
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = classifier_dropout
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ = use_prompt
snake_case_ = prompt_length
snake_case_ = prompt_mid_dim
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __lowerCamelCase ):
snake_case_ = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 704 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: str = VQModel
__lowercase: Union[str, Any] = """sample"""
@property
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[str]=(32, 32) ) ->Tuple:
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(UpperCAmelCase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case_ = image.to(UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
| 2 | 0 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__SCREAMING_SNAKE_CASE : Union[str, Any] = concatenate_datasets
__SCREAMING_SNAKE_CASE : Dict = DownloadConfig
__SCREAMING_SNAKE_CASE : Optional[int] = DownloadManager
__SCREAMING_SNAKE_CASE : Tuple = DownloadMode
__SCREAMING_SNAKE_CASE : Union[str, Any] = DownloadConfig
__SCREAMING_SNAKE_CASE : Tuple = DownloadMode
__SCREAMING_SNAKE_CASE : int = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 705 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Dict = KandinskyVaaControlnetPipeline
__lowercase: str = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase: Tuple = False
@property
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return 100
@property
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=0 ) ->List[str]:
"""simple docstring"""
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
# create hint
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = """cpu"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case_ = torch.from_numpy(np.array(UpperCAmelCase_ ) ).float() / 255.0
snake_case_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case_ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
snake_case_ = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = """A robot, 4k photo"""
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ = pipeline(
image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , hint=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , output_type="""np""" , )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__SCREAMING_SNAKE_CASE : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : list[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase_ )
self.set_fail_transitions()
def lowerCAmelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str ) ->int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str ) ->None:
"""simple docstring"""
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(UpperCAmelCase_ , UpperCAmelCase_ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case_ = len(self.adlist ) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->None:
"""simple docstring"""
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(UpperCAmelCase_ , self.adlist[child]["""value"""] ) is None
and state != 0
):
snake_case_ = self.adlist[state]["""fail_state"""]
snake_case_ = self.find_next_state(
UpperCAmelCase_ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str ) ->dict[str, list[int]]:
"""simple docstring"""
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(UpperCAmelCase_ ) ):
while (
self.find_next_state(UpperCAmelCase_ , string[i] ) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]["""fail_state"""]
snake_case_ = self.find_next_state(UpperCAmelCase_ , string[i] )
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(UpperCAmelCase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__SCREAMING_SNAKE_CASE : List[Any] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def _a ( _SCREAMING_SNAKE_CASE = "mumbai" ) -> Generator[tuple[str, str], None, None]:
snake_case_ = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
snake_case_ = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
snake_case_ = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""") | 707 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Dict=[10, 20, 30, 40] , UpperCAmelCase_ : List[Any]=[2, 2, 3, 2] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : int=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : Optional[int]=[2, 3, 4] , UpperCAmelCase_ : List[str]=None , ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_stages
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = out_features
snake_case_ = out_indices
snake_case_ = scope
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = ConvNextVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowercase: Union[str, Any] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: Optional[Any] = False
__lowercase: Any = False
__lowercase: Union[str, Any] = False
__lowercase: Dict = False
def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = True
if model_class.__name__ in [
*get_values(UpperCAmelCase_ ),
*get_values(UpperCAmelCase_ ),
]:
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = False
snake_case_ = True
if (
model_class.__name__
in [*get_values(UpperCAmelCase_ ), *get_values(UpperCAmelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ConvNextVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> str:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = preprocessor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 2 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
__SCREAMING_SNAKE_CASE : int = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
@lru_cache()
def _a ( ) -> Dict:
snake_case_ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
snake_case_ = bs[:]
snake_case_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
snake_case_ = [chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = set()
snake_case_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ = char
return pairs
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = VOCAB_FILES_NAMES
__lowercase: List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : int="<unk>" , UpperCAmelCase_ : List[str]="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Tuple , ) ->Optional[int]:
"""simple docstring"""
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
snake_case_ = json.load(_lowerCAmelCase )
snake_case_ = {v: k for k, v in self.encoder.items()}
snake_case_ = errors # how to handle errors in decoding
snake_case_ = bytes_to_unicode()
snake_case_ = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
snake_case_ = merges_handle.read().split("""\n""" )[1:-1]
snake_case_ = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
snake_case_ = {}
snake_case_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ) ->int:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
snake_case_ = tuple(_lowerCAmelCase )
snake_case_ = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
snake_case_ = min(_lowerCAmelCase , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ = bigram
snake_case_ = []
snake_case_ = 0
while i < len(_lowerCAmelCase ):
try:
snake_case_ = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ = tuple(_lowerCAmelCase )
snake_case_ = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
snake_case_ = get_pairs(_lowerCAmelCase )
snake_case_ = """ """.join(_lowerCAmelCase )
snake_case_ = word
return word
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : int ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = []
for token in re.findall(self.pat , _lowerCAmelCase ):
snake_case_ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple ) ->List[Any]:
"""simple docstring"""
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
return self.decoder.get(_lowerCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] ) ->Tuple:
"""simple docstring"""
snake_case_ = """""".join(_lowerCAmelCase )
snake_case_ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple = None ) ->Dict:
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
snake_case_ = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
snake_case_ = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] = None ) ->Optional[Any]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] = None , UpperCAmelCase_ : List[str] = False ) ->List[Any]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] = None ) ->Tuple:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
snake_case_ = """ """ + text
return (text, kwargs)
| 708 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = ['model.decoder.embed_positions.weights']
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
if "emb" in name:
snake_case_ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
snake_case_ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
snake_case_ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
snake_case_ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
snake_case_ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
snake_case_ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
snake_case_ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
snake_case_ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
snake_case_ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
snake_case_ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case_ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[Dict, Dict]:
snake_case_ = list(state_dict.keys() )
snake_case_ = {}
for key in keys:
snake_case_ = state_dict.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = rename_keys(_SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case_ = val[:hidden_size, :]
snake_case_ = val[hidden_size : 2 * hidden_size, :]
snake_case_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case_ = val
else:
snake_case_ = val
return state_dict, enc_dec_proj_state_dict
def _a ( _SCREAMING_SNAKE_CASE ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
snake_case_ = 1_024
snake_case_ = 24
snake_case_ = 16
elif checkpoint == "medium":
snake_case_ = 1_536
snake_case_ = 48
snake_case_ = 24
elif checkpoint == "large":
snake_case_ = 2_048
snake_case_ = 48
snake_case_ = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
snake_case_ = MusicgenDecoderConfig(
hidden_size=_SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="cpu" ) -> Tuple:
snake_case_ = MusicGen.get_pretrained(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
snake_case_ = decoder_config_from_checkpoint(_SCREAMING_SNAKE_CASE )
snake_case_ = fairseq_model.lm.state_dict()
snake_case_ , snake_case_ = rename_state_dict(
_SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
snake_case_ = TaEncoderModel.from_pretrained("""t5-base""" )
snake_case_ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
snake_case_ = MusicgenForCausalLM(_SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case_ , snake_case_ = decoder.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
snake_case_ = MusicgenForConditionalGeneration(text_encoder=_SCREAMING_SNAKE_CASE , audio_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_SCREAMING_SNAKE_CASE )
# check we can do a forward pass
snake_case_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case_ = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
snake_case_ = AutoTokenizer.from_pretrained("""t5-base""" )
snake_case_ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
snake_case_ = MusicgenProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
snake_case_ = 2_048
snake_case_ = 2_048
# set other default generation config params
snake_case_ = int(30 * audio_encoder.config.frame_rate )
snake_case_ = True
snake_case_ = 3.0
if pytorch_dump_folder is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 2 | 0 |
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 709 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
snake_case_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__SCREAMING_SNAKE_CASE : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__SCREAMING_SNAKE_CASE : List[Any] = {'facebook/blenderbot-3B': 128}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[str] = VOCAB_FILES_NAMES
__lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: Any = ["""input_ids""", """attention_mask"""]
__lowercase: Optional[Any] = BlenderbotTokenizer
def __init__( self : Dict , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]="replace" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : str="</s>" , UpperCAmelCase_ : List[str]="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : Union[str, Any]="<unk>" , UpperCAmelCase_ : Optional[Any]="<pad>" , UpperCAmelCase_ : Dict="<mask>" , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=True , **UpperCAmelCase_ : Dict , ) ->int:
"""simple docstring"""
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
snake_case_ = getattr(__A , pre_tok_state.pop("""type""" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**__A )
snake_case_ = add_prefix_space
snake_case_ = "post_processor"
snake_case_ = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state["""sep"""] )
if "cls" in state:
snake_case_ = tuple(state["""cls"""] )
snake_case_ = False
if state.get("""add_prefix_space""" , __A ) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get("""trim_offsets""" , __A ) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(__A , state.pop("""type""" ) )
snake_case_ = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->str:
"""simple docstring"""
snake_case_ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
snake_case_ = value
def lowerCAmelCase ( self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ) ->str:
"""simple docstring"""
snake_case_ = kwargs.get("""is_split_into_words""" , __A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A , **__A )
def lowerCAmelCase ( self : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = kwargs.get("""is_split_into_words""" , __A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A , **__A )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->List[str]:
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[str]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->int:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : "Conversation" ) ->Dict:
"""simple docstring"""
snake_case_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
snake_case_ = " ".join(__A )
snake_case_ = self.encode(__A )
if len(__A ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 710 |
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(_SCREAMING_SNAKE_CASE ) / (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__SCREAMING_SNAKE_CASE : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 2 | 0 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A (UpperCamelCase_ , unittest.TestCase):
__lowercase: List[str] = CodeGenTokenizer
__lowercase: Union[str, Any] = CodeGenTokenizerFast
__lowercase: Tuple = True
__lowercase: Optional[int] = {"""add_prefix_space""": True}
__lowercase: int = False
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
snake_case_ = dict(zip(_a , range(len(_a ) ) ) )
snake_case_ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case_ = {"""unk_token""": """<unk>"""}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def lowerCAmelCase ( self : Optional[int] , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_a )
def lowerCAmelCase ( self : Optional[int] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = """lower newer"""
snake_case_ = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ = """lower newer"""
snake_case_ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case_ = tokenizer.tokenize(_a , add_prefix_space=_a )
self.assertListEqual(_a , _a )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer(add_prefix_space=_a )
snake_case_ = """lower newer"""
# Testing tokenization
snake_case_ = tokenizer.tokenize(_a , add_prefix_space=_a )
snake_case_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids without special tokens
snake_case_ = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
snake_case_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids with special tokens
snake_case_ = self.get_rust_tokenizer(add_prefix_space=_a )
snake_case_ = tokenizer.encode(_a , add_prefix_space=_a )
snake_case_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# Testing the unknown token
snake_case_ = tokens + [rust_tokenizer.unk_token]
snake_case_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_a ) , _a )
def lowerCAmelCase ( self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ) ->Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any]=15 ) ->Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
# Simple input
snake_case_ = """This is a simple input"""
snake_case_ = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case_ = ("""This is a simple input""", """This is a pair""")
snake_case_ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="""max_length""" )
# Simple input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="""max_length""" )
# Simple input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="""max_length""" , )
# Pair input
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="""max_length""" )
# Pair input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="""max_length""" )
# Pair input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="""max_length""" , )
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
snake_case_ = """This is a simple input"""
snake_case_ = ["""This is a simple input looooooooong""", """This is a simple input"""]
snake_case_ = ("""This is a simple input""", """This is a pair""")
snake_case_ = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
snake_case_ = tokenizer.pad_token_id
snake_case_ = tokenizer(_a , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
snake_case_ = tokenizer(_a , padding=_a , truncate=_a , return_tensors="""np""" )
snake_case_ = tokenizer(*_a , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
snake_case_ = tokenizer(_a , padding=_a , truncate=_a , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
snake_case_ = """$$$"""
snake_case_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_a , add_bos_token=_a )
snake_case_ = """This is a simple input"""
snake_case_ = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case_ = tokenizer.bos_token_id
snake_case_ = tokenizer(_a )
snake_case_ = tokenizer(_a )
self.assertEqual(out_s.input_ids[0] , _a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case_ = tokenizer.decode(out_s.input_ids )
snake_case_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
snake_case_ = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
snake_case_ = """\nif len_a > len_b: result = a\nelse: result = b"""
snake_case_ = tokenizer.encode(_a )
snake_case_ = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
snake_case_ = tokenizer.decode(_a , truncate_before_pattern=_a )
self.assertEqual(_a , _a )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
pass
| 711 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> str:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 2 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__SCREAMING_SNAKE_CASE : str = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Any:
if rng is None:
snake_case_ = random.Random()
snake_case_ = 1
for dim in shape:
total_dims *= dim
snake_case_ = []
for _ in range(__UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
snake_case_ = np.array(__UpperCAmelCase , dtype=jnp.intaa ).reshape(__UpperCAmelCase )
return output
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Dict:
snake_case_ = ids_tensor(__UpperCAmelCase , vocab_size=2 , rng=__UpperCAmelCase )
# make sure that at least one token is attended to for each batch
snake_case_ = 1
return attn_mask
@require_flax
class __A :
'''simple docstring'''
__lowercase: int = None
__lowercase: Dict = ()
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
snake_case_ = 2
snake_case_ = inputs["""input_ids"""].shape[-1] // 2
snake_case_ = inputs["""input_ids"""][:max_batch_size, :sequence_length]
snake_case_ = jnp.ones_like(UpperCAmelCase_ )
snake_case_ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
snake_case_ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
snake_case_ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config()
snake_case_ = False
snake_case_ = max_length
snake_case_ = 0
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , flax_model.params )
snake_case_ = flax_model.generate(UpperCAmelCase_ ).sequences
snake_case_ = pt_model.generate(torch.tensor(UpperCAmelCase_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
snake_case_ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config()
snake_case_ = False
snake_case_ = max_length
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config()
snake_case_ = True
snake_case_ = max_length
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config()
snake_case_ = False
snake_case_ = max_length
snake_case_ = 2
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config()
snake_case_ = False
snake_case_ = max_length
snake_case_ = 2
snake_case_ = 2
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config()
snake_case_ = True
snake_case_ = max_length
snake_case_ = 0.8
snake_case_ = 10
snake_case_ = 0.3
snake_case_ = 1
snake_case_ = 8
snake_case_ = 9
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config()
snake_case_ = max_length
snake_case_ = 1
snake_case_ = 8
snake_case_ = 9
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config()
snake_case_ = max_length
snake_case_ = 2
snake_case_ = 1
snake_case_ = 8
snake_case_ = 9
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ = attention_mask.at[(0, 0)].set(0 )
snake_case_ = False
snake_case_ = max_length
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ = attention_mask.at[(0, 0)].set(0 )
snake_case_ = True
snake_case_ = max_length
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ = attention_mask.at[(0, 0)].set(0 )
snake_case_ = 2
snake_case_ = max_length
for model_class in self.all_generative_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
snake_case_ = jit(model.generate )
snake_case_ = jit_generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
snake_case_ = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
snake_case_ = """Hello world"""
snake_case_ = tokenizer(UpperCAmelCase_ , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(UpperCAmelCase_ , """do_samples""" ):
model.generate(UpperCAmelCase_ , do_samples=UpperCAmelCase_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(UpperCAmelCase_ , """foo""" ):
snake_case_ = {"""foo""": """bar"""}
model.generate(UpperCAmelCase_ , **UpperCAmelCase_ )
| 712 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=18 , UpperCAmelCase_ : Optional[Any]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Any]=True , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = size if size is not None else {"""height""": 18, """width""": 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """clusters""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
snake_case_ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(UpperCAmelCase_ , """image_processor.json""" )
image_processor_first.to_json_file(UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_json_file(UpperCAmelCase_ ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_pretrained(UpperCAmelCase_ ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
pass
def _a ( ) -> str:
snake_case_ = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
snake_case_ = Image.open(dataset[4]["""file"""] )
snake_case_ = Image.open(dataset[5]["""file"""] )
snake_case_ = [imagea, imagea]
return images
@require_vision
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
snake_case_ = prepare_images()
# test non-batched
snake_case_ = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
snake_case_ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase_ )
# test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
snake_case_ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase_ )
| 2 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
snake_case_ = 1
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_UpperCAmelCase )
@property
def lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
def extract(*UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ):
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] ) ->str:
"""simple docstring"""
snake_case_ = torch.ones([0] )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ) ->List[Any]:
"""simple docstring"""
self.pixel_values.to(_UpperCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.dummy_cond_unet
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
snake_case_ = self.dummy_vae
snake_case_ = self.dummy_text_encoder
snake_case_ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
snake_case_ = 77
snake_case_ = self.dummy_image.to(_UpperCAmelCase )
snake_case_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
snake_case_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCAmelCase )
snake_case_ = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = """A painting of a squirrel eating a burger"""
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = alt_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_UpperCAmelCase , )
snake_case_ = output.images
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = alt_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
snake_case_ = self.dummy_cond_unet
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
snake_case_ = self.dummy_vae
snake_case_ = self.dummy_text_encoder
snake_case_ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
snake_case_ = 77
snake_case_ = self.dummy_image.to(_UpperCAmelCase )
# put models in fp16
snake_case_ = unet.half()
snake_case_ = vae.half()
snake_case_ = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
snake_case_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCAmelCase )
snake_case_ = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = """A painting of a squirrel eating a burger"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe(
[prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , image=_UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ = init_image.resize((760, 504) )
snake_case_ = """BAAI/AltDiffusion"""
snake_case_ = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = """A fantasy landscape, trending on artstation"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_UpperCAmelCase , output_type="""np""" , )
snake_case_ = output.images[0]
snake_case_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case_ = init_image.resize((768, 512) )
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
snake_case_ = """BAAI/AltDiffusion"""
snake_case_ = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = """A fantasy landscape, trending on artstation"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_UpperCAmelCase , output_type="""np""" , )
snake_case_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 713 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : Dict=24 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=1_000 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = range_bbox
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , ) ->str:
"""simple docstring"""
snake_case_ = LiltModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , ) ->Dict:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LiltForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , ) ->Dict:
"""simple docstring"""
snake_case_ = LiltForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase: Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: List[str] = False
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
return True
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = LiltModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LiltModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
@slow
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(UpperCAmelCase_ )
snake_case_ = torch.tensor([[1, 2]] , device=UpperCAmelCase_ )
snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_ )
snake_case_ = torch.Size([1, 2, 768] )
snake_case_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1E-3 ) )
| 2 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__SCREAMING_SNAKE_CASE : List[Any] = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if args.student_type == "roberta":
snake_case_ = False
elif args.student_type == "gpt2":
snake_case_ = False
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
if args.student_type == "roberta":
snake_case_ = False
def _a ( ) -> Union[str, Any]:
snake_case_ = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__A , required=__A , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__A , required=__A , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__A , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__A , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__A , required=__A , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__A , type=__A , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__A , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__A , required=__A , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__A , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__A , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__A , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__A , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__A , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__A , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__A , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__A , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__A , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__A , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__A , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__A , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__A , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__A , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__A , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__A , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__A , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=__A , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__A , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__A , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__A , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__A , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__A , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__A , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__A , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__A , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__A , default=4_000 , help="""Checkpoint interval.""" )
snake_case_ = parser.parse_args()
sanity_checks(__A )
# ARGS #
init_gpu_params(__A )
set_seed(__A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__A ) , __A , indent=4 )
git_log(args.dump_path )
snake_case_ = MODEL_CLASSES[args.student_type]
snake_case_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case_ = tokenizer.all_special_tokens.index(__A )
snake_case_ = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
snake_case_ = special_tok_ids
snake_case_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , """rb""" ) as fp:
snake_case_ = pickle.load(__A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , """rb""" ) as fp:
snake_case_ = pickle.load(__A )
snake_case_ = np.maximum(__A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case_ = 0.0 # do not predict special tokens
snake_case_ = torch.from_numpy(__A )
else:
snake_case_ = None
snake_case_ = LmSeqsDataset(params=__A , data=__A )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
snake_case_ = student_config_class.from_pretrained(args.student_config )
snake_case_ = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
snake_case_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__A )
else:
snake_case_ = student_model_class(__A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("""Student loaded.""" )
# TEACHER #
snake_case_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__A , __A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__A , __A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case_ = Distiller(
params=__A , dataset=__A , token_probs=__A , student=__A , teacher=__A )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 714 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = 0
snake_case_ = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case_ = i + 1
else:
snake_case_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> int:
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case_ = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
snake_case_ = False
__SCREAMING_SNAKE_CASE : List[Any] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__SCREAMING_SNAKE_CASE : List[Any] = ['A', 'B', 'C']
generate_all_permutations(sequence_a)
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__SCREAMING_SNAKE_CASE : Tuple = 1.0_54_57_18_17E-34 # unit of ℏ : J * s
__SCREAMING_SNAKE_CASE : Dict = 3E8 # unit of c : m * s^-1
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
snake_case_ = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
snake_case_ = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
snake_case_ = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 'Input must be a string of 8 numbers plus letter'
__SCREAMING_SNAKE_CASE : Dict = 'TRWAGMYFPDXBNJZSQVHLCKE'
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
snake_case_ = spanish_id.replace("""-""" , """""" ).upper()
if len(_SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
snake_case_ = int(spanish_id_clean[0:8] )
snake_case_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(_SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
snake_case_ = 10
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
snake_case_ = [1, 2, 3, 4]
snake_case_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
snake_case_ = process_story(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [] )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = ''''''
snake_case_ = process_story(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [] )
self.assertEqual(lowerCamelCase_ , [] )
def lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
snake_case_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
snake_case_ = process_story(lowerCamelCase_ )
snake_case_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ = ['''It was the best of times.''']
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = torch.tensor([1, 2, 3, 4] )
snake_case_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 0 ).numpy() , expected.numpy() )
def lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
snake_case_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
snake_case_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 23 ).numpy() , expected.numpy() )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 1 ).numpy() , expected.numpy() )
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = 101
snake_case_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
snake_case_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case_ = compute_token_type_ids(lowerCamelCase_ , lowerCamelCase_ )
np.testing.assert_array_equal(lowerCamelCase_ , lowerCamelCase_ )
| 717 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : List[str] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__SCREAMING_SNAKE_CASE : List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__SCREAMING_SNAKE_CASE : int = '▁'
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = VOCAB_FILES_NAMES
__lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]="[CLS]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="[SEP]" , UpperCAmelCase_ : Optional[Any]="<pad>" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : int="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->None:
"""simple docstring"""
snake_case_ = (
AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ , normalized=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else mask_token
)
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Tuple , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Any ) ->str:
"""simple docstring"""
if self.remove_space:
snake_case_ = """ """.join(inputs.strip().split() )
else:
snake_case_ = inputs
snake_case_ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
snake_case_ = unicodedata.normalize("""NFKD""" , UpperCAmelCase_ )
snake_case_ = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
snake_case_ = outputs.lower()
return outputs
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.preprocess_text(UpperCAmelCase_ )
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
snake_case_ = []
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
snake_case_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ = cur_pieces[1:]
else:
snake_case_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict ) ->Any:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 2 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _a ( ) -> Dict:
snake_case_ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=_A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=_A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=_A )
return parser.parse_args()
def _a ( ) -> Optional[int]:
snake_case_ = parse_args()
# Import training_script as a module.
snake_case_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ = script_fpath.stem
snake_case_ = importlib.import_module(_A )
# Patch sys.argv
snake_case_ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 718 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
snake_case_ = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
snake_case_ = int(sequence[i] , 2 )
return sequence
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case_ = gray_code_sequence_string(bit_count - 1 )
snake_case_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case_ = """0""" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case_ = """1""" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
def _a ( _SCREAMING_SNAKE_CASE = 2_000_000 ) -> int:
snake_case_ = [0 for i in range(n + 1 )]
snake_case_ = 1
snake_case_ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _SCREAMING_SNAKE_CASE ):
snake_case_ = 1
snake_case_ = 0
for i in range(_SCREAMING_SNAKE_CASE ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __A (SCREAMING_SNAKE_CASE__):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
with self.assertRaises(_lowercase ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
with self.assertRaises(_lowercase ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def lowerCAmelCase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
import PIL.Image
snake_case_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=_lowercase ) as mock_cast_to_python_objects:
snake_case_ = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
snake_case_ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , _lowercase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = pa.BufferReader(__UpperCamelCase ) if isinstance(__UpperCamelCase , pa.Buffer ) else pa.memory_map(__UpperCamelCase )
snake_case_ = pa.ipc.open_stream(__UpperCamelCase )
snake_case_ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _a ( ) -> List[str]:
snake_case_ = pa.BufferOutputStream()
snake_case_ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__UpperCamelCase , features=__UpperCamelCase ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pa.ipc.open_stream(__UpperCamelCase )
snake_case_ = f.read_all()
snake_case_ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__UpperCamelCase )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
snake_case_ = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
snake_case_ = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _a ( ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
snake_case_ = os.path.join(__UpperCamelCase , """test.arrow""" )
with ArrowWriter(path=__UpperCamelCase , schema=pa.schema(__UpperCamelCase ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(__UpperCamelCase , 1 )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if pa.types.is_list(__UpperCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if isinstance(lst[0] , __UpperCamelCase ):
change_first_primitive_element_in_list(lst[0] , __UpperCamelCase )
else:
snake_case_ = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = pa.array(TypedSequence(__UpperCamelCase , optimized_int_type=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
snake_case_ = copy.deepcopy(__UpperCamelCase )
snake_case_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__UpperCamelCase , __UpperCamelCase )
snake_case_ = pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__UpperCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = """mock://dataset-train.arrow"""
with ArrowWriter(path=__UpperCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__UpperCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__UpperCamelCase )
def _a ( ) -> Optional[Any]:
snake_case_ = pa.BufferOutputStream()
with ParquetWriter(stream=__UpperCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pq.read_table(__UpperCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
import PIL.Image
snake_case_ = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__UpperCamelCase , format="""png""" )
snake_case_ = pa.BufferOutputStream()
with ParquetWriter(
stream=__UpperCamelCase , features=Features({"""image""": Image()} ) , embed_local_files=__UpperCamelCase ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pq.read_table(__UpperCamelCase )
snake_case_ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __UpperCamelCase )
with open(__UpperCamelCase , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _a ( ) -> Union[str, Any]:
snake_case_ = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__UpperCamelCase )] )
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(stream=__UpperCamelCase ) as writer:
writer._build_writer(inferred_schema=__UpperCamelCase )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 720 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = 'https://openaipublic.azureedge.net/jukebox/models/'
__SCREAMING_SNAKE_CASE : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
snake_case_ = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
snake_case_ = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case_ = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
snake_case_ = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = {}
import re
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
snake_case_ = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
snake_case_ = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
snake_case_ = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
snake_case_ = original_key
snake_case_ = replace_key(_SCREAMING_SNAKE_CASE )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
snake_case_ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
snake_case_ = original_key
snake_case_ = original_key
snake_case_ = value
return new_dict
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
snake_case_ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_SCREAMING_SNAKE_CASE )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , """wb""" ).write(r.content )
snake_case_ = MODEL_MAPPING[model_name.split("""/""" )[-1]]
snake_case_ = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = JukeboxModel(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""]
snake_case_ = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
snake_case_ = old_dic[k]
elif k.endswith(""".w""" ):
snake_case_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case_ = old_dic[k]
else:
snake_case_ = old_dic[k]
snake_case_ = """vqvae""" if i == 0 else f"""priors.{3 - i}"""
snake_case_ = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
snake_case_ = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , """w""" ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
import bisect
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> Any:
if hi < 0:
snake_case_ = len(snake_case__ )
while lo < hi:
snake_case_ = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case_ = mid + 1
else:
snake_case_ = mid
return lo
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> Any:
if hi < 0:
snake_case_ = len(snake_case__ )
while lo < hi:
snake_case_ = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case_ = mid + 1
else:
snake_case_ = mid
return lo
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> Any:
sorted_collection.insert(bisect_left(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , snake_case__ )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> int:
sorted_collection.insert(bisect_right(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , snake_case__ )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = 0
snake_case_ = len(snake_case__ ) - 1
while left <= right:
snake_case_ = left + (right - left) // 2
snake_case_ = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case_ = midpoint - 1
else:
snake_case_ = midpoint + 1
return None
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = bisect.bisect_left(snake_case__ , snake_case__ )
if index != len(snake_case__ ) and sorted_collection[index] == item:
return index
return None
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
if right < left:
return None
snake_case_ = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case__ , snake_case__ , snake_case__ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case__ , snake_case__ , midpoint + 1 , snake_case__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : Any = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : int = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 721 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE : Dict = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
__SCREAMING_SNAKE_CASE : Dict = 'zero2'
__SCREAMING_SNAKE_CASE : List[Any] = 'zero3'
__SCREAMING_SNAKE_CASE : int = [ZEROa, ZEROa]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
snake_case_ = parameterized.to_safe_name("""_""".join(str(_SCREAMING_SNAKE_CASE ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__SCREAMING_SNAKE_CASE : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A (snake_case__):
'''simple docstring'''
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ) ->Any:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ) ->List[str]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]:
"""simple docstring"""
snake_case_ = models[model]
snake_case_ = self.run_trainer(
stage=UpperCAmelCase_ , model_name=UpperCAmelCase_ , eval_steps=UpperCAmelCase_ , num_train_epochs=1 , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
self.do_checks(UpperCAmelCase_ )
return output_dir
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir("""./xxx""" , after=UpperCAmelCase_ )
snake_case_ = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(UpperCAmelCase_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case_ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
snake_case_ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
snake_case_ = self.get_launcher(UpperCAmelCase_ )
snake_case_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase_ , env=self.get_env() )
return output_dir
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any=False ) ->Tuple:
"""simple docstring"""
snake_case_ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 2 | 0 |
"""simple docstring"""
import math
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = []
snake_case_ = 2
snake_case_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
snake_case_ = [True] * (end + 1)
snake_case_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
snake_case_ = False
start += 1
prime += in_prime
snake_case_ = end + 1
snake_case_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
snake_case_ = [True] * (high - low + 1)
for each in in_prime:
snake_case_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
snake_case_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ = high + 1
snake_case_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 700 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__lowercase: ClassVar[Features] = Features({"""audio""": Audio()})
__lowercase: ClassVar[Features] = Features({"""transcription""": Value("""string""")})
__lowercase: str = "audio"
__lowercase: str = "transcription"
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , UpperCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.input_schema.copy()
snake_case_ = features[self.audio_column]
snake_case_ = input_schema
return task_template
@property
def lowerCAmelCase ( self : List[str] ) ->Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 2 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = SwinConfig()
snake_case_ = swin_name.split("""_""" )
snake_case_ = name_split[1]
snake_case_ = int(name_split[4] )
snake_case_ = int(name_split[3][-1] )
if model_size == "tiny":
snake_case_ = 96
snake_case_ = (2, 2, 6, 2)
snake_case_ = (3, 6, 12, 24)
elif model_size == "small":
snake_case_ = 96
snake_case_ = (2, 2, 18, 2)
snake_case_ = (3, 6, 12, 24)
elif model_size == "base":
snake_case_ = 128
snake_case_ = (2, 2, 18, 2)
snake_case_ = (4, 8, 16, 32)
else:
snake_case_ = 192
snake_case_ = (2, 2, 18, 2)
snake_case_ = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case_ = 21_841
else:
snake_case_ = 1_000
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = img_size
snake_case_ = num_classes
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
return config
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if "patch_embed.proj" in name:
snake_case_ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case_ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
snake_case_ = """encoder.""" + name
if "attn.proj" in name:
snake_case_ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case_ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case_ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case_ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case_ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case_ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
snake_case_ = """layernorm.weight"""
if name == "norm.bias":
snake_case_ = """layernorm.bias"""
if "head" in name:
snake_case_ = name.replace("""head""" , """classifier""" )
else:
snake_case_ = """swin.""" + name
return name
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
snake_case_ = key.split(""".""" )
snake_case_ = int(key_split[1] )
snake_case_ = int(key_split[3] )
snake_case_ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[
:dim
]
snake_case_ = val[
dim : dim * 2
]
snake_case_ = val[
-dim:
]
else:
snake_case_ = val
return orig_state_dict
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
snake_case_ = get_swin_config(_SCREAMING_SNAKE_CASE )
snake_case_ = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
snake_case_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case_ = timm_model(inputs["""pixel_values"""] )
snake_case_ = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 701 |
"""simple docstring"""
from functools import reduce
__SCREAMING_SNAKE_CASE : Tuple = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _a ( _SCREAMING_SNAKE_CASE = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 2 | 0 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=None ) ->Optional[int]:
"""simple docstring"""
snake_case_ = list(poly_a or [0] )[:]
snake_case_ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
snake_case_ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
snake_case_ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
snake_case_ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
snake_case_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
snake_case_ = self.__multiply()
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str ) ->int:
"""simple docstring"""
snake_case_ = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(__A ) <= 1:
return dft[0]
#
snake_case_ = self.c_max_length // 2
while next_ncol > 0:
snake_case_ = [[] for i in range(__A )]
snake_case_ = self.root**next_ncol
# First half of next step
snake_case_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__A ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
snake_case_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__A ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
snake_case_ = new_dft
snake_case_ = next_ncol // 2
return dft[0]
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.__dft("""A""" )
snake_case_ = self.__dft("""B""" )
snake_case_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
snake_case_ = 2
while next_ncol <= self.c_max_length:
snake_case_ = [[] for i in range(__A )]
snake_case_ = self.root ** (next_ncol // 2)
snake_case_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
snake_case_ = new_inverse_c
next_ncol *= 2
# Unpack
snake_case_ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : List[Any] ) ->int:
"""simple docstring"""
snake_case_ = "A = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
snake_case_ = "B = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
snake_case_ = "A*B = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = """mctct"""
def __init__( self : Dict , UpperCAmelCase_ : List[Any]=8_065 , UpperCAmelCase_ : Tuple=1_536 , UpperCAmelCase_ : Optional[Any]=36 , UpperCAmelCase_ : int=6_144 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Any=384 , UpperCAmelCase_ : List[str]=920 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : Any=0.3 , UpperCAmelCase_ : Tuple="relu" , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=0.3 , UpperCAmelCase_ : str=0.3 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=0.3 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Optional[Any]=(7,) , UpperCAmelCase_ : Optional[Any]=(3,) , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[str]="sum" , UpperCAmelCase_ : Union[str, Any]=False , **UpperCAmelCase_ : Any , ) ->Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = max_position_embeddings
snake_case_ = layer_norm_eps
snake_case_ = layerdrop
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = conv_glu_dim
snake_case_ = conv_dropout
snake_case_ = num_conv_layers
snake_case_ = input_feat_per_channel
snake_case_ = input_channels
snake_case_ = conv_channels
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 2 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
while a != 0:
snake_case_ = b % a, a
return b
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
if gcd(_lowerCamelCase , _lowerCamelCase ) != 1:
snake_case_ = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(_lowerCamelCase )
snake_case_ = 1, 0, a
snake_case_ = 0, 1, m
while va != 0:
snake_case_ = ua // va
snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 703 |
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE = 100 ) -> int:
return sum(int(_SCREAMING_SNAKE_CASE ) for x in str(factorial(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 2 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = Dict[str, Any]
__SCREAMING_SNAKE_CASE : List[Any] = List[Prediction]
@add_end_docstrings(__SCREAMING_SNAKE_CASE)
class __A (__SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->Any:
"""simple docstring"""
super().__init__(*_a , **_a )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCAmelCase ( self : List[Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = {}
if "threshold" in kwargs:
snake_case_ = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ) ->int:
"""simple docstring"""
return super().__call__(*_a , **_a )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Any ) ->str:
"""simple docstring"""
snake_case_ = load_image(_a )
snake_case_ = torch.IntTensor([[image.height, image.width]] )
snake_case_ = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
snake_case_ = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
snake_case_ = target_size
return inputs
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = model_inputs.pop("""target_size""" )
snake_case_ = self.model(**_a )
snake_case_ = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
snake_case_ = model_inputs["""bbox"""]
return model_outputs
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=0.9 ) ->str:
"""simple docstring"""
snake_case_ = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
snake_case_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase_ : List[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
snake_case_ = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
snake_case_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
snake_case_ = [unnormalize(_a ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
snake_case_ = ["""score""", """label""", """box"""]
snake_case_ = [dict(zip(_a , _a ) ) for vals in zip(scores.tolist() , _a , _a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
snake_case_ = self.image_processor.post_process_object_detection(_a , _a , _a )
snake_case_ = raw_annotations[0]
snake_case_ = raw_annotation["""scores"""]
snake_case_ = raw_annotation["""labels"""]
snake_case_ = raw_annotation["""boxes"""]
snake_case_ = scores.tolist()
snake_case_ = [self.model.config.idalabel[label.item()] for label in labels]
snake_case_ = [self._get_bounding_box(_a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
snake_case_ = ["""score""", """label""", """box"""]
snake_case_ = [
dict(zip(_a , _a ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def lowerCAmelCase ( self : int , UpperCAmelCase_ : "torch.Tensor" ) ->Tuple:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
snake_case_ = box.int().tolist()
snake_case_ = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 704 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: str = VQModel
__lowercase: Union[str, Any] = """sample"""
@property
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[str]=(32, 32) ) ->Tuple:
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(UpperCAmelCase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case_ = image.to(UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['OwlViTFeatureExtractor']
__SCREAMING_SNAKE_CASE : int = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Dict = KandinskyVaaControlnetPipeline
__lowercase: str = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase: Tuple = False
@property
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return 100
@property
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=0 ) ->List[str]:
"""simple docstring"""
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
# create hint
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = """cpu"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case_ = torch.from_numpy(np.array(UpperCAmelCase_ ) ).float() / 255.0
snake_case_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case_ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
snake_case_ = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = """A robot, 4k photo"""
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ = pipeline(
image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , hint=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , output_type="""np""" , )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 2 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__SCREAMING_SNAKE_CASE : Optional[Any] = False
class __A (unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
image=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
snake_case_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 706 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : list[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase_ )
self.set_fail_transitions()
def lowerCAmelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str ) ->int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str ) ->None:
"""simple docstring"""
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(UpperCAmelCase_ , UpperCAmelCase_ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case_ = len(self.adlist ) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->None:
"""simple docstring"""
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(UpperCAmelCase_ , self.adlist[child]["""value"""] ) is None
and state != 0
):
snake_case_ = self.adlist[state]["""fail_state"""]
snake_case_ = self.find_next_state(
UpperCAmelCase_ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str ) ->dict[str, list[int]]:
"""simple docstring"""
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(UpperCAmelCase_ ) ):
while (
self.find_next_state(UpperCAmelCase_ , string[i] ) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]["""fail_state"""]
snake_case_ = self.find_next_state(UpperCAmelCase_ , string[i] )
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(UpperCAmelCase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class __A (_UpperCAmelCase):
'''simple docstring'''
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_ )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase_ )
snake_case_ = self.values[key]
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
return (
sum(self.charge_factor - len(lowercase_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str=None ) ->Dict:
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowercase_ ) == 0
):
return key
return super()._collision_resolution(lowercase_ , lowercase_ ) | 707 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Dict=[10, 20, 30, 40] , UpperCAmelCase_ : List[Any]=[2, 2, 3, 2] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : int=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : Optional[int]=[2, 3, 4] , UpperCAmelCase_ : List[str]=None , ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_stages
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = out_features
snake_case_ = out_indices
snake_case_ = scope
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = ConvNextVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowercase: Union[str, Any] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: Optional[Any] = False
__lowercase: Any = False
__lowercase: Union[str, Any] = False
__lowercase: Dict = False
def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = True
if model_class.__name__ in [
*get_values(UpperCAmelCase_ ),
*get_values(UpperCAmelCase_ ),
]:
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = False
snake_case_ = True
if (
model_class.__name__
in [*get_values(UpperCAmelCase_ ), *get_values(UpperCAmelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ConvNextVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> str:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = preprocessor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
__SCREAMING_SNAKE_CASE : int = 100
__SCREAMING_SNAKE_CASE : str = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__SCREAMING_SNAKE_CASE : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def _a ( _SCREAMING_SNAKE_CASE ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
snake_case_ = set()
snake_case_ = 42
snake_case_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a ( _SCREAMING_SNAKE_CASE = 5_000 ) -> int | None:
for number_to_partition in range(1 , _lowercase ):
if len(partition(_lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 708 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = ['model.decoder.embed_positions.weights']
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
if "emb" in name:
snake_case_ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
snake_case_ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
snake_case_ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
snake_case_ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
snake_case_ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
snake_case_ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
snake_case_ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
snake_case_ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
snake_case_ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
snake_case_ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case_ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[Dict, Dict]:
snake_case_ = list(state_dict.keys() )
snake_case_ = {}
for key in keys:
snake_case_ = state_dict.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = rename_keys(_SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case_ = val[:hidden_size, :]
snake_case_ = val[hidden_size : 2 * hidden_size, :]
snake_case_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case_ = val
else:
snake_case_ = val
return state_dict, enc_dec_proj_state_dict
def _a ( _SCREAMING_SNAKE_CASE ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
snake_case_ = 1_024
snake_case_ = 24
snake_case_ = 16
elif checkpoint == "medium":
snake_case_ = 1_536
snake_case_ = 48
snake_case_ = 24
elif checkpoint == "large":
snake_case_ = 2_048
snake_case_ = 48
snake_case_ = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
snake_case_ = MusicgenDecoderConfig(
hidden_size=_SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="cpu" ) -> Tuple:
snake_case_ = MusicGen.get_pretrained(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
snake_case_ = decoder_config_from_checkpoint(_SCREAMING_SNAKE_CASE )
snake_case_ = fairseq_model.lm.state_dict()
snake_case_ , snake_case_ = rename_state_dict(
_SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
snake_case_ = TaEncoderModel.from_pretrained("""t5-base""" )
snake_case_ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
snake_case_ = MusicgenForCausalLM(_SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case_ , snake_case_ = decoder.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
snake_case_ = MusicgenForConditionalGeneration(text_encoder=_SCREAMING_SNAKE_CASE , audio_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_SCREAMING_SNAKE_CASE )
# check we can do a forward pass
snake_case_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case_ = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
snake_case_ = AutoTokenizer.from_pretrained("""t5-base""" )
snake_case_ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
snake_case_ = MusicgenProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
snake_case_ = 2_048
snake_case_ = 2_048
# set other default generation config params
snake_case_ = int(30 * audio_encoder.config.frame_rate )
snake_case_ = True
snake_case_ = 3.0
if pytorch_dump_folder is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 2 | 0 |
"""simple docstring"""
from math import isclose, sqrt
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = point_y / 4 / point_x
snake_case_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case_ = outgoing_gradient**2 + 4
snake_case_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case_ = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case_ = x_minus if isclose(UpperCAmelCase__ , UpperCAmelCase__ ) else x_plus
snake_case_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _a ( _SCREAMING_SNAKE_CASE = 1.4 , _SCREAMING_SNAKE_CASE = -9.6 ) -> str:
snake_case_ = 0
snake_case_ = first_x_coord
snake_case_ = first_y_coord
snake_case_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case_ , snake_case_ , snake_case_ = next_point(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 709 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
snake_case_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 710 |
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(_SCREAMING_SNAKE_CASE ) / (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__SCREAMING_SNAKE_CASE : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 2 | 0 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = []
for part_id in partition_order:
snake_case_ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(_lowercase ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _a ( ) -> Dict:
snake_case_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case_ = spark.range(100 ).repartition(1 )
snake_case_ = Spark(_lowercase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _a ( ) -> Union[str, Any]:
snake_case_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case_ = spark.range(10 ).repartition(2 )
snake_case_ = [1, 0]
snake_case_ = _generate_iterable_examples(_lowercase , _lowercase ) # Reverse the partitions.
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowercase , _lowercase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _a ( ) -> Dict:
snake_case_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case_ = spark.range(10 ).repartition(1 )
snake_case_ = SparkExamplesIterable(_lowercase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowercase ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _a ( ) -> Dict:
snake_case_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
snake_case_ = lambda _SCREAMING_SNAKE_CASE : x.reverse()
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowercase , [2, 1, 0] )
snake_case_ = SparkExamplesIterable(_lowercase ).shuffle_data_sources(_lowercase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowercase ):
snake_case_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _a ( ) -> List[Any]:
snake_case_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case_ = SparkExamplesIterable(_lowercase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowercase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowercase ):
snake_case_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case_ = SparkExamplesIterable(_lowercase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowercase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowercase ):
snake_case_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _a ( ) -> Union[str, Any]:
snake_case_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case_ = spark.range(100 ).repartition(1 )
snake_case_ = Spark(_lowercase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 711 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> str:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 2 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A (unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__lowercase: Dict = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) ->int:
"""simple docstring"""
snake_case_ = AudioClassificationPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
# test with a raw waveform
snake_case_ = np.zeros((34_000,) )
snake_case_ = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ = examples
snake_case_ = audio_classifier(_SCREAMING_SNAKE_CASE )
# by default a model is initialized with num_labels=2
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE )},
{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
snake_case_ = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
self.run_torchaudio(_SCREAMING_SNAKE_CASE )
@require_torchaudio
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
import datasets
# test with a local file
snake_case_ = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
snake_case_ = dataset[0]["""audio"""]["""array"""]
snake_case_ = audio_classifier(_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE )},
{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """label""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ = """anton-l/wav2vec2-random-tiny-classifier"""
snake_case_ = pipeline("""audio-classification""" , model=_SCREAMING_SNAKE_CASE )
snake_case_ = np.ones((8_000,) )
snake_case_ = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=4 )
snake_case_ = [
{"""score""": 0.0_842, """label""": """no"""},
{"""score""": 0.0_838, """label""": """up"""},
{"""score""": 0.0_837, """label""": """go"""},
{"""score""": 0.0_834, """label""": """right"""},
]
snake_case_ = [
{"""score""": 0.0_845, """label""": """stop"""},
{"""score""": 0.0_844, """label""": """on"""},
{"""score""": 0.0_841, """label""": """right"""},
{"""score""": 0.0_834, """label""": """left"""},
]
self.assertIn(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case_ = {"""array""": np.ones((8_000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
snake_case_ = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=4 )
self.assertIn(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
import datasets
snake_case_ = """superb/wav2vec2-base-superb-ks"""
snake_case_ = pipeline("""audio-classification""" , model=_SCREAMING_SNAKE_CASE )
snake_case_ = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
snake_case_ = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
snake_case_ = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=4 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
pass
| 712 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=18 , UpperCAmelCase_ : Optional[Any]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Any]=True , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = size if size is not None else {"""height""": 18, """width""": 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """clusters""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
snake_case_ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(UpperCAmelCase_ , """image_processor.json""" )
image_processor_first.to_json_file(UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_json_file(UpperCAmelCase_ ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_pretrained(UpperCAmelCase_ ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
pass
def _a ( ) -> str:
snake_case_ = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
snake_case_ = Image.open(dataset[4]["""file"""] )
snake_case_ = Image.open(dataset[5]["""file"""] )
snake_case_ = [imagea, imagea]
return images
@require_vision
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
snake_case_ = prepare_images()
# test non-batched
snake_case_ = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
snake_case_ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase_ )
# test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
snake_case_ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase_ )
| 2 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 713 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : Dict=24 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=1_000 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = range_bbox
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , ) ->str:
"""simple docstring"""
snake_case_ = LiltModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , ) ->Dict:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LiltForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , ) ->Dict:
"""simple docstring"""
snake_case_ = LiltForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase: Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: List[str] = False
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
return True
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = LiltModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LiltModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
@slow
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(UpperCAmelCase_ )
snake_case_ = torch.tensor([[1, 2]] , device=UpperCAmelCase_ )
snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_ )
snake_case_ = torch.Size([1, 2, 768] )
snake_case_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1E-3 ) )
| 2 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A (lowercase__):
'''simple docstring'''
__lowercase: List[Any] = """"""
__lowercase: str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__lowercase: str = None # compression type in fsspec. ex: "gzip"
__lowercase: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Any , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , **UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
super().__init__(self , **__lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ = fsspec.open(
__lowercase , mode="""rb""" , protocol=__lowercase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case_ = os.path.basename(self.file.path.split("""::""" )[0] )
snake_case_ = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case_ = None
@classmethod
def lowerCAmelCase ( cls : Any , UpperCAmelCase_ : List[str] ) ->Any:
"""simple docstring"""
return super()._strip_protocol(__lowercase ).lstrip("""/""" )
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
if self.dir_cache is None:
snake_case_ = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
snake_case_ = {f["""name"""]: f}
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str ) ->int:
"""simple docstring"""
return self.file.open().read()
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Dict , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self._strip_protocol(__lowercase )
if mode != "rb":
raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'""" )
return self.file.open()
class __A (lowercase__):
'''simple docstring'''
__lowercase: Any = """bz2"""
__lowercase: Dict = """bz2"""
__lowercase: Optional[int] = """.bz2"""
class __A (lowercase__):
'''simple docstring'''
__lowercase: int = """gzip"""
__lowercase: List[str] = """gzip"""
__lowercase: Optional[Any] = """.gz"""
class __A (lowercase__):
'''simple docstring'''
__lowercase: Any = """lz4"""
__lowercase: Union[str, Any] = """lz4"""
__lowercase: List[str] = """.lz4"""
class __A (lowercase__):
'''simple docstring'''
__lowercase: Optional[int] = """xz"""
__lowercase: Any = """xz"""
__lowercase: str = """.xz"""
class __A (lowercase__):
'''simple docstring'''
__lowercase: List[Any] = """zstd"""
__lowercase: List[str] = """zstd"""
__lowercase: Any = """.zst"""
def __init__( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , UpperCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **UpperCAmelCase_ : int , ) ->Any:
"""simple docstring"""
super().__init__(
fo=__lowercase , mode=__lowercase , target_protocol=__lowercase , target_options=__lowercase , block_size=__lowercase , **__lowercase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ = self.file.__enter__
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str] ) ->Dict:
"""simple docstring"""
snake_case_ = file_
def __enter__( self : Optional[Any] ) ->Any:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ) ->Dict:
"""simple docstring"""
self._file.__exit__(*__lowercase , **__lowercase )
def __iter__( self : Dict ) ->List[Any]:
"""simple docstring"""
return iter(self._file )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
return next(self._file )
def __getattr__( self : Union[str, Any] , UpperCAmelCase_ : Tuple ) ->int:
"""simple docstring"""
return getattr(self._file , __lowercase )
def fixed_enter(*UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
return WrappedFile(_enter(*__lowercase , **__lowercase ) )
snake_case_ = fixed_enter
| 714 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = 0
snake_case_ = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case_ = i + 1
else:
snake_case_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
snake_case_ = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 'Input must be a string of 8 numbers plus letter'
__SCREAMING_SNAKE_CASE : Dict = 'TRWAGMYFPDXBNJZSQVHLCKE'
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
snake_case_ = spanish_id.replace("""-""" , """""" ).upper()
if len(_SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
snake_case_ = int(spanish_id_clean[0:8] )
snake_case_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(_SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__SCREAMING_SNAKE_CASE : Tuple = Lock()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__lowerCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case_ = min(__lowerCAmelCase , __lowerCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__lowerCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case_ = max(__lowerCAmelCase , __lowerCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__lowerCAmelCase )
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = []
snake_case_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case_ = Pipe()
snake_case_ = Pipe()
process_array_.append(
Process(
target=__lowerCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case_ = temp_rs
snake_case_ = temp_rr
for i in range(1 , len(__lowerCAmelCase ) - 1 ):
snake_case_ = Pipe()
snake_case_ = Pipe()
process_array_.append(
Process(
target=__lowerCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case_ = temp_rs
snake_case_ = temp_rr
process_array_.append(
Process(
target=__lowerCAmelCase , args=(
len(__lowerCAmelCase ) - 1,
arr[len(__lowerCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__lowerCAmelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__lowerCAmelCase ) ):
snake_case_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _a ( ) -> Tuple:
snake_case_ = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*__lowerCAmelCase )
snake_case_ = odd_even_transposition(__lowerCAmelCase )
print("""Sorted List\n""" )
print(*__lowerCAmelCase )
if __name__ == "__main__":
main()
| 717 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : List[str] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__SCREAMING_SNAKE_CASE : List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__SCREAMING_SNAKE_CASE : int = '▁'
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = VOCAB_FILES_NAMES
__lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]="[CLS]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="[SEP]" , UpperCAmelCase_ : Optional[Any]="<pad>" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : int="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->None:
"""simple docstring"""
snake_case_ = (
AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ , normalized=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else mask_token
)
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Tuple , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Any ) ->str:
"""simple docstring"""
if self.remove_space:
snake_case_ = """ """.join(inputs.strip().split() )
else:
snake_case_ = inputs
snake_case_ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
snake_case_ = unicodedata.normalize("""NFKD""" , UpperCAmelCase_ )
snake_case_ = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
snake_case_ = outputs.lower()
return outputs
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.preprocess_text(UpperCAmelCase_ )
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
snake_case_ = []
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
snake_case_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ = cur_pieces[1:]
else:
snake_case_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict ) ->Any:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 2 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__SCREAMING_SNAKE_CASE : Any = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
__SCREAMING_SNAKE_CASE : Dict = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__SCREAMING_SNAKE_CASE : int = f"""down_blocks.{i}.resnets.{j}."""
__SCREAMING_SNAKE_CASE : str = f"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__SCREAMING_SNAKE_CASE : Optional[int] = f"""down_blocks.{i}.attentions.{j}."""
__SCREAMING_SNAKE_CASE : Dict = f"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__SCREAMING_SNAKE_CASE : Any = f"""up_blocks.{i}.resnets.{j}."""
__SCREAMING_SNAKE_CASE : Tuple = f"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__SCREAMING_SNAKE_CASE : Optional[Any] = f"""up_blocks.{i}.attentions.{j}."""
__SCREAMING_SNAKE_CASE : str = f"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__SCREAMING_SNAKE_CASE : Optional[int] = f"""down_blocks.{i}.downsamplers.0.conv."""
__SCREAMING_SNAKE_CASE : Any = f"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__SCREAMING_SNAKE_CASE : Optional[Any] = f"""up_blocks.{i}.upsamplers.0."""
__SCREAMING_SNAKE_CASE : Union[str, Any] = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__SCREAMING_SNAKE_CASE : Dict = 'mid_block.attentions.0.'
__SCREAMING_SNAKE_CASE : List[Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__SCREAMING_SNAKE_CASE : Union[str, Any] = f"""mid_block.resnets.{j}."""
__SCREAMING_SNAKE_CASE : str = f"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
snake_case_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
snake_case_ = v.replace(lowercase__ , lowercase__ )
snake_case_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
snake_case_ = v.replace(lowercase__ , lowercase__ )
snake_case_ = v
snake_case_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__SCREAMING_SNAKE_CASE : List[Any] = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__SCREAMING_SNAKE_CASE : List[Any] = f"""encoder.down_blocks.{i}.resnets.{j}."""
__SCREAMING_SNAKE_CASE : Tuple = f"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__SCREAMING_SNAKE_CASE : List[Any] = f"""down_blocks.{i}.downsamplers.0."""
__SCREAMING_SNAKE_CASE : List[Any] = f"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__SCREAMING_SNAKE_CASE : Optional[Any] = f"""up_blocks.{i}.upsamplers.0."""
__SCREAMING_SNAKE_CASE : str = f"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__SCREAMING_SNAKE_CASE : str = f"""decoder.up_blocks.{i}.resnets.{j}."""
__SCREAMING_SNAKE_CASE : Dict = f"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__SCREAMING_SNAKE_CASE : Tuple = f"""mid_block.resnets.{i}."""
__SCREAMING_SNAKE_CASE : Optional[Any] = f"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__SCREAMING_SNAKE_CASE : List[Any] = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
return w.reshape(*w.shape , 1 , 1 )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
snake_case_ = v.replace(lowercase__ , lowercase__ )
snake_case_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
snake_case_ = v.replace(lowercase__ , lowercase__ )
snake_case_ = v
snake_case_ = {v: vae_state_dict[k] for k, v in mapping.items()}
snake_case_ = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
snake_case_ = reshape_weight_for_sd(lowercase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__SCREAMING_SNAKE_CASE : Any = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
__SCREAMING_SNAKE_CASE : Any = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__SCREAMING_SNAKE_CASE : Any = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__SCREAMING_SNAKE_CASE : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
snake_case_ = k[: -len(""".q_proj.weight""" )]
snake_case_ = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
snake_case_ = [None, None, None]
snake_case_ = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
snake_case_ = k[: -len(""".q_proj.bias""" )]
snake_case_ = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
snake_case_ = [None, None, None]
snake_case_ = v
continue
snake_case_ = textenc_pattern.sub(lambda _SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , lowercase__ )
snake_case_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
snake_case_ = textenc_pattern.sub(lambda _SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , lowercase__ )
snake_case_ = torch.cat(lowercase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
snake_case_ = textenc_pattern.sub(lambda _SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , lowercase__ )
snake_case_ = torch.cat(lowercase__ )
return new_state_dict
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return text_enc_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__SCREAMING_SNAKE_CASE : List[str] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
__SCREAMING_SNAKE_CASE : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
__SCREAMING_SNAKE_CASE : Tuple = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__SCREAMING_SNAKE_CASE : Optional[int] = load_file(unet_path, device='cpu')
else:
__SCREAMING_SNAKE_CASE : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
__SCREAMING_SNAKE_CASE : Tuple = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
__SCREAMING_SNAKE_CASE : Optional[int] = load_file(vae_path, device='cpu')
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
__SCREAMING_SNAKE_CASE : List[str] = load_file(text_enc_path, device='cpu')
else:
__SCREAMING_SNAKE_CASE : str = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
__SCREAMING_SNAKE_CASE : List[str] = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
__SCREAMING_SNAKE_CASE : Tuple = convert_unet_state_dict(unet_state_dict)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__SCREAMING_SNAKE_CASE : int = convert_vae_state_dict(vae_state_dict)
__SCREAMING_SNAKE_CASE : List[Any] = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__SCREAMING_SNAKE_CASE : str = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__SCREAMING_SNAKE_CASE : Dict = {'transformer.' + k: v for k, v in text_enc_dict.items()}
__SCREAMING_SNAKE_CASE : str = convert_text_enc_state_dict_vaa(text_enc_dict)
__SCREAMING_SNAKE_CASE : List[str] = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
__SCREAMING_SNAKE_CASE : List[Any] = convert_text_enc_state_dict(text_enc_dict)
__SCREAMING_SNAKE_CASE : List[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__SCREAMING_SNAKE_CASE : Tuple = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__SCREAMING_SNAKE_CASE : List[str] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__SCREAMING_SNAKE_CASE : Any = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 718 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
snake_case_ = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
snake_case_ = int(sequence[i] , 2 )
return sequence
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case_ = gray_code_sequence_string(bit_count - 1 )
snake_case_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case_ = """0""" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case_ = """1""" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__SCREAMING_SNAKE_CASE : Tuple = 'facebook/wmt19-en-de'
__SCREAMING_SNAKE_CASE : Any = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__SCREAMING_SNAKE_CASE : List[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(['Making tiny model'], return_tensors='pt')
__SCREAMING_SNAKE_CASE : List[Any] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
__SCREAMING_SNAKE_CASE : List[str] = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """layoutlmv3"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=50_265 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[str]=1_024 , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : List[Any]=128 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=128 , UpperCAmelCase_ : str=64 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=224 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Any , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=UpperCAmelCase__ , hidden_size=UpperCAmelCase__ , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , intermediate_size=UpperCAmelCase__ , hidden_act=UpperCAmelCase__ , hidden_dropout_prob=UpperCAmelCase__ , attention_probs_dropout_prob=UpperCAmelCase__ , max_position_embeddings=UpperCAmelCase__ , type_vocab_size=UpperCAmelCase__ , initializer_range=UpperCAmelCase__ , layer_norm_eps=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case_ = max_ad_position_embeddings
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = has_relative_attention_bias
snake_case_ = rel_pos_bins
snake_case_ = max_rel_pos
snake_case_ = has_spatial_attention_bias
snake_case_ = rel_ad_pos_bins
snake_case_ = max_rel_ad_pos
snake_case_ = text_embed
snake_case_ = visual_embed
snake_case_ = input_size
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = classifier_dropout
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = version.parse("""1.12""")
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : List[Any] ) ->float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : str , UpperCAmelCase_ : "ProcessorMixin" , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , ) ->Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , UpperCAmelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase__ )
snake_case_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case_ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case_ = self._generate_dummy_images(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
snake_case_ = dict(
processor(
UpperCAmelCase__ , text=UpperCAmelCase__ , boxes=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , ) )
return inputs
| 720 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = 'https://openaipublic.azureedge.net/jukebox/models/'
__SCREAMING_SNAKE_CASE : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
snake_case_ = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
snake_case_ = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case_ = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
snake_case_ = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = {}
import re
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
snake_case_ = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
snake_case_ = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
snake_case_ = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
snake_case_ = original_key
snake_case_ = replace_key(_SCREAMING_SNAKE_CASE )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
snake_case_ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
snake_case_ = original_key
snake_case_ = original_key
snake_case_ = value
return new_dict
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
snake_case_ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_SCREAMING_SNAKE_CASE )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , """wb""" ).write(r.content )
snake_case_ = MODEL_MAPPING[model_name.split("""/""" )[-1]]
snake_case_ = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = JukeboxModel(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""]
snake_case_ = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
snake_case_ = old_dic[k]
elif k.endswith(""".w""" ):
snake_case_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case_ = old_dic[k]
else:
snake_case_ = old_dic[k]
snake_case_ = """vqvae""" if i == 0 else f"""priors.{3 - i}"""
snake_case_ = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
snake_case_ = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , """w""" ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 2 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class __A (__a):
'''simple docstring'''
__lowercase: int = """gptsan-japanese"""
__lowercase: Dict = [
"""past_key_values""",
]
__lowercase: List[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Dict , UpperCAmelCase_ : List[str]=36_000 , UpperCAmelCase_ : Any=1_280 , UpperCAmelCase_ : List[Any]=1_024 , UpperCAmelCase_ : Union[str, Any]=8_192 , UpperCAmelCase_ : List[Any]=4_096 , UpperCAmelCase_ : List[Any]=128 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Optional[int]=1E-5 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Any="float32" , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : List[Any]=0.002 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=35_998 , UpperCAmelCase_ : Optional[Any]=35_995 , UpperCAmelCase_ : List[Any]=35_999 , **UpperCAmelCase_ : Optional[Any] , ) ->List[Any]:
"""simple docstring"""
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = d_ff
snake_case_ = d_ext
snake_case_ = d_spout
snake_case_ = num_switch_layers
snake_case_ = num_ext_layers
snake_case_ = num_switch_layers + num_ext_layers
snake_case_ = num_heads
snake_case_ = num_experts
snake_case_ = expert_capacity
snake_case_ = dropout_rate
snake_case_ = layer_norm_epsilon
snake_case_ = router_bias
snake_case_ = router_jitter_noise
snake_case_ = router_dtype
snake_case_ = router_ignore_padding_tokens
snake_case_ = output_hidden_states
snake_case_ = output_attentions
snake_case_ = initializer_factor
snake_case_ = output_router_logits
snake_case_ = use_cache
super().__init__(
separator_token_id=A__ , pad_token_id=A__ , eos_token_id=A__ , **A__ , )
| 721 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE : Dict = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
__SCREAMING_SNAKE_CASE : Dict = 'zero2'
__SCREAMING_SNAKE_CASE : List[Any] = 'zero3'
__SCREAMING_SNAKE_CASE : int = [ZEROa, ZEROa]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
snake_case_ = parameterized.to_safe_name("""_""".join(str(_SCREAMING_SNAKE_CASE ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__SCREAMING_SNAKE_CASE : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A (snake_case__):
'''simple docstring'''
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ) ->Any:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ) ->List[str]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]:
"""simple docstring"""
snake_case_ = models[model]
snake_case_ = self.run_trainer(
stage=UpperCAmelCase_ , model_name=UpperCAmelCase_ , eval_steps=UpperCAmelCase_ , num_train_epochs=1 , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
self.do_checks(UpperCAmelCase_ )
return output_dir
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir("""./xxx""" , after=UpperCAmelCase_ )
snake_case_ = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(UpperCAmelCase_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case_ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
snake_case_ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
snake_case_ = self.get_launcher(UpperCAmelCase_ )
snake_case_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase_ , env=self.get_env() )
return output_dir
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any=False ) ->Tuple:
"""simple docstring"""
snake_case_ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 2 | 0 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
snake_case_ = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case_ = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case_ = name[1:]
# figure out how many levels deep the name is
snake_case_ = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(_SCREAMING_SNAKE_CASE )
# read data
snake_case_ = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
names.append("""/""".join(_SCREAMING_SNAKE_CASE ) )
arrays.append(_SCREAMING_SNAKE_CASE )
logger.info(f"""Read a total of {len(_SCREAMING_SNAKE_CASE ):,} layers""" )
# Sanity check
if len(set(_SCREAMING_SNAKE_CASE ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(_SCREAMING_SNAKE_CASE ) )})""" )
snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = full_name.split("""/""" )
snake_case_ = model
snake_case_ = []
for i, m_name in enumerate(_SCREAMING_SNAKE_CASE ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
snake_case_ = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """embeddings""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """encoder""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """layer""" )
snake_case_ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """pooler""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """token_type_embeddings""" )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append("""weight""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """output""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """output""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """output""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """output""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """intermediate""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """weight""" )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , _SCREAMING_SNAKE_CASE ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , _SCREAMING_SNAKE_CASE ):
snake_case_ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case_ = array.transpose()
if pointer.shape == array.shape:
snake_case_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
# Instantiate model
logger.info(f"""Loading model based on config from {config_path}...""" )
snake_case_ = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
snake_case_ = BertModel(_SCREAMING_SNAKE_CASE )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 700 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__lowercase: ClassVar[Features] = Features({"""audio""": Audio()})
__lowercase: ClassVar[Features] = Features({"""transcription""": Value("""string""")})
__lowercase: str = "audio"
__lowercase: str = "transcription"
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , UpperCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.input_schema.copy()
snake_case_ = features[self.audio_column]
snake_case_ = input_schema
return task_template
@property
def lowerCAmelCase ( self : List[str] ) ->Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 701 |
"""simple docstring"""
from functools import reduce
__SCREAMING_SNAKE_CASE : Tuple = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _a ( _SCREAMING_SNAKE_CASE = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = """mctct"""
def __init__( self : Dict , UpperCAmelCase_ : List[Any]=8_065 , UpperCAmelCase_ : Tuple=1_536 , UpperCAmelCase_ : Optional[Any]=36 , UpperCAmelCase_ : int=6_144 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Any=384 , UpperCAmelCase_ : List[str]=920 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : Any=0.3 , UpperCAmelCase_ : Tuple="relu" , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=0.3 , UpperCAmelCase_ : str=0.3 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=0.3 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Optional[Any]=(7,) , UpperCAmelCase_ : Optional[Any]=(3,) , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[str]="sum" , UpperCAmelCase_ : Union[str, Any]=False , **UpperCAmelCase_ : Any , ) ->Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = max_position_embeddings
snake_case_ = layer_norm_eps
snake_case_ = layerdrop
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = conv_glu_dim
snake_case_ = conv_dropout
snake_case_ = num_conv_layers
snake_case_ = input_feat_per_channel
snake_case_ = input_channels
snake_case_ = conv_channels
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 2 | 0 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__SCREAMING_SNAKE_CASE : Tuple = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__SCREAMING_SNAKE_CASE : int = spec.loader.load_module()
__SCREAMING_SNAKE_CASE : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__SCREAMING_SNAKE_CASE : Dict = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
__SCREAMING_SNAKE_CASE : Dict = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def _a ( ) -> Union[str, Any]:
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ = False
# source code of `config_class`
snake_case_ = inspect.getsource(SCREAMING_SNAKE_CASE__ )
snake_case_ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
snake_case_ = True
break
snake_case_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ = """\n""".join(sorted(SCREAMING_SNAKE_CASE__ ) )
raise ValueError(f"""The following configurations don\'t contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 703 |
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE = 100 ) -> int:
return sum(int(_SCREAMING_SNAKE_CASE ) for x in str(factorial(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 2 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__SCREAMING_SNAKE_CASE : List[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class __A (unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowercase: Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__lowercase: Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__lowercase: str = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = ZeroShotClassificationPipeline(
model=__snake_case , tokenizer=__snake_case , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ) ->Any:
"""simple docstring"""
snake_case_ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(__snake_case , {"""sequence""": ANY(__snake_case ), """labels""": [ANY(__snake_case )], """scores""": [ANY(__snake_case )]} )
# No kwarg
snake_case_ = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(__snake_case , {"""sequence""": ANY(__snake_case ), """labels""": [ANY(__snake_case )], """scores""": [ANY(__snake_case )]} )
snake_case_ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(__snake_case , {"""sequence""": ANY(__snake_case ), """labels""": [ANY(__snake_case )], """scores""": [ANY(__snake_case )]} )
snake_case_ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
__snake_case , {"""sequence""": ANY(__snake_case ), """labels""": [ANY(__snake_case ), ANY(__snake_case )], """scores""": [ANY(__snake_case ), ANY(__snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
snake_case_ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
__snake_case , {"""sequence""": ANY(__snake_case ), """labels""": [ANY(__snake_case ), ANY(__snake_case )], """scores""": [ANY(__snake_case ), ANY(__snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
snake_case_ = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(__snake_case , {"""sequence""": ANY(__snake_case ), """labels""": [ANY(__snake_case )], """scores""": [ANY(__snake_case )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
__snake_case , [
{"""sequence""": ANY(__snake_case ), """labels""": [ANY(__snake_case ), ANY(__snake_case )], """scores""": [ANY(__snake_case ), ANY(__snake_case )]}
for i in range(1 )
] , )
snake_case_ = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
__snake_case , [
{"""sequence""": ANY(__snake_case ), """labels""": [ANY(__snake_case ), ANY(__snake_case )], """scores""": [ANY(__snake_case ), ANY(__snake_case )]}
for i in range(2 )
] , )
with self.assertRaises(__snake_case ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(__snake_case ):
classifier(__snake_case , candidate_labels="""politics""" )
with self.assertRaises(__snake_case ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(__snake_case ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=__snake_case )
with self.assertRaises(__snake_case ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(__snake_case ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=__snake_case , )
self.run_entailment_id(__snake_case )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Pipeline ) ->int:
"""simple docstring"""
snake_case_ = zero_shot_classifier.model.config
snake_case_ = config.labelaid
snake_case_ = zero_shot_classifier.entailment_id
snake_case_ = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ = original_labelaid
self.assertEqual(__snake_case , zero_shot_classifier.entailment_id )
@require_torch
def lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
snake_case_ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
snake_case_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__snake_case ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@require_tf
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
snake_case_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__snake_case ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
snake_case_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__snake_case ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
snake_case_ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
snake_case_ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
snake_case_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__snake_case ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
snake_case_ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
| 704 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: str = VQModel
__lowercase: Union[str, Any] = """sample"""
@property
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[str]=(32, 32) ) ->Tuple:
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(UpperCAmelCase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case_ = image.to(UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
| 2 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[int] = ProphetNetTokenizer
__lowercase: List[str] = False
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
super().setUp()
snake_case_ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = """UNwant\u00E9d,running"""
snake_case_ = """unwanted, running"""
return input_text, output_text
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.tokenizer_class(self.vocab_file )
snake_case_ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(UpperCAmelCase_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
snake_case_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=UpperCAmelCase_ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCAmelCase ( self : Optional[Any] ) ->int:
"""simple docstring"""
snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
snake_case_ = {}
for i, token in enumerate(UpperCAmelCase_ ):
snake_case_ = i
snake_case_ = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
snake_case_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case_ = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
snake_case_ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
snake_case_ = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 705 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Dict = KandinskyVaaControlnetPipeline
__lowercase: str = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase: Tuple = False
@property
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return 100
@property
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=0 ) ->List[str]:
"""simple docstring"""
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
# create hint
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = """cpu"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case_ = torch.from_numpy(np.array(UpperCAmelCase_ ) ).float() / 255.0
snake_case_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case_ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
snake_case_ = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = """A robot, 4k photo"""
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ = pipeline(
image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , hint=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , output_type="""np""" , )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 2 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __A (_UpperCamelCase):
'''simple docstring'''
__lowercase: Tuple = "yolos"
def __init__( self : Dict , UpperCAmelCase_ : Optional[Any]=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[int]=1E-12 , UpperCAmelCase_ : List[Any]=[512, 864] , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=100 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : str=5 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , **UpperCAmelCase_ : List[str] , ) ->List[str]:
"""simple docstring"""
super().__init__(**__a )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = num_detection_tokens
snake_case_ = use_mid_position_embeddings
snake_case_ = auxiliary_loss
# Hungarian matcher
snake_case_ = class_cost
snake_case_ = bbox_cost
snake_case_ = giou_cost
# Loss coefficients
snake_case_ = bbox_loss_coefficient
snake_case_ = giou_loss_coefficient
snake_case_ = eos_coefficient
class __A (_UpperCamelCase):
'''simple docstring'''
__lowercase: List[str] = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : str ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : Optional[Any] ) ->float:
"""simple docstring"""
return 1E-4
@property
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
return 12
| 706 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : list[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase_ )
self.set_fail_transitions()
def lowerCAmelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str ) ->int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str ) ->None:
"""simple docstring"""
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(UpperCAmelCase_ , UpperCAmelCase_ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case_ = len(self.adlist ) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->None:
"""simple docstring"""
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(UpperCAmelCase_ , self.adlist[child]["""value"""] ) is None
and state != 0
):
snake_case_ = self.adlist[state]["""fail_state"""]
snake_case_ = self.find_next_state(
UpperCAmelCase_ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str ) ->dict[str, list[int]]:
"""simple docstring"""
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(UpperCAmelCase_ ) ):
while (
self.find_next_state(UpperCAmelCase_ , string[i] ) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]["""fail_state"""]
snake_case_ = self.find_next_state(UpperCAmelCase_ , string[i] )
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(UpperCAmelCase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class __A (UpperCAmelCase_ , UpperCAmelCase_):
'''simple docstring'''
__lowercase: Optional[int] = "convnextv2"
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : str=1E-12 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : List[str]=224 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Optional[int] , ) ->Optional[int]:
"""simple docstring"""
super().__init__(**_snake_case )
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = num_stages
snake_case_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
snake_case_ = [3, 3, 9, 3] if depths is None else depths
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = drop_path_rate
snake_case_ = image_size
snake_case_ = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names ) | 707 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Dict=[10, 20, 30, 40] , UpperCAmelCase_ : List[Any]=[2, 2, 3, 2] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : int=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : Optional[int]=[2, 3, 4] , UpperCAmelCase_ : List[str]=None , ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_stages
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = out_features
snake_case_ = out_indices
snake_case_ = scope
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = ConvNextVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowercase: Union[str, Any] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: Optional[Any] = False
__lowercase: Any = False
__lowercase: Union[str, Any] = False
__lowercase: Dict = False
def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = True
if model_class.__name__ in [
*get_values(UpperCAmelCase_ ),
*get_values(UpperCAmelCase_ ),
]:
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = False
snake_case_ = True
if (
model_class.__name__
in [*get_values(UpperCAmelCase_ ), *get_values(UpperCAmelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ConvNextVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> str:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = preprocessor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 2 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A (_lowerCAmelCase):
'''simple docstring'''
@slow
@require_torch
def lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case_ = bertabert.config.encoder.vocab_size
snake_case_ = tokenizer.sep_token_id
snake_case_ = tokenizer.cls_token_id
snake_case_ = 128
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
snake_case_ = train_dataset.select(range(32 ) )
snake_case_ = val_dataset.select(range(16 ) )
snake_case_ = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_lowerCAmelCase , max_length=512 )
snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_lowerCAmelCase , max_length=128 )
snake_case_ = inputs.input_ids
snake_case_ = inputs.attention_mask
snake_case_ = outputs.input_ids
snake_case_ = outputs.input_ids.copy()
snake_case_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
snake_case_ = outputs.attention_mask
assert all(len(_lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Tuple ):
snake_case_ = pred.label_ids
snake_case_ = pred.predictions
# all unnecessary tokens are removed
snake_case_ = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
snake_case_ = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_lowerCAmelCase ) )] ) / len(_lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
snake_case_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCAmelCase , batch_size=_lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
snake_case_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCAmelCase , batch_size=_lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = SeqaSeqTrainingArguments(
output_dir=_lowerCAmelCase , per_device_train_batch_size=_lowerCAmelCase , per_device_eval_batch_size=_lowerCAmelCase , predict_with_generate=_lowerCAmelCase , evaluation_strategy="""steps""" , do_train=_lowerCAmelCase , do_eval=_lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case_ = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
# start training
trainer.train()
| 708 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = ['model.decoder.embed_positions.weights']
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
if "emb" in name:
snake_case_ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
snake_case_ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
snake_case_ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
snake_case_ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
snake_case_ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
snake_case_ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
snake_case_ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
snake_case_ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
snake_case_ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
snake_case_ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case_ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[Dict, Dict]:
snake_case_ = list(state_dict.keys() )
snake_case_ = {}
for key in keys:
snake_case_ = state_dict.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = rename_keys(_SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case_ = val[:hidden_size, :]
snake_case_ = val[hidden_size : 2 * hidden_size, :]
snake_case_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case_ = val
else:
snake_case_ = val
return state_dict, enc_dec_proj_state_dict
def _a ( _SCREAMING_SNAKE_CASE ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
snake_case_ = 1_024
snake_case_ = 24
snake_case_ = 16
elif checkpoint == "medium":
snake_case_ = 1_536
snake_case_ = 48
snake_case_ = 24
elif checkpoint == "large":
snake_case_ = 2_048
snake_case_ = 48
snake_case_ = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
snake_case_ = MusicgenDecoderConfig(
hidden_size=_SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="cpu" ) -> Tuple:
snake_case_ = MusicGen.get_pretrained(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
snake_case_ = decoder_config_from_checkpoint(_SCREAMING_SNAKE_CASE )
snake_case_ = fairseq_model.lm.state_dict()
snake_case_ , snake_case_ = rename_state_dict(
_SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
snake_case_ = TaEncoderModel.from_pretrained("""t5-base""" )
snake_case_ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
snake_case_ = MusicgenForCausalLM(_SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case_ , snake_case_ = decoder.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
snake_case_ = MusicgenForConditionalGeneration(text_encoder=_SCREAMING_SNAKE_CASE , audio_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_SCREAMING_SNAKE_CASE )
# check we can do a forward pass
snake_case_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case_ = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
snake_case_ = AutoTokenizer.from_pretrained("""t5-base""" )
snake_case_ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
snake_case_ = MusicgenProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
snake_case_ = 2_048
snake_case_ = 2_048
# set other default generation config params
snake_case_ = int(30 * audio_encoder.config.frame_rate )
snake_case_ = True
snake_case_ = 3.0
if pytorch_dump_folder is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 2 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
set_seed(770)
__SCREAMING_SNAKE_CASE : str = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.dirname(os.path.abspath(__file__))
__SCREAMING_SNAKE_CASE : str = os.path.join(os.path.expanduser('~'), '.cache')
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
snake_case_ = model_type
if use_small:
key += "_small"
return os.path.join(snake_case_ , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
os.makedirs(snake_case_ , exist_ok=snake_case_ )
hf_hub_download(repo_id=snake_case_ , filename=snake_case_ , local_dir=snake_case_ )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> Tuple:
if model_type == "text":
snake_case_ = BarkSemanticModel
snake_case_ = BarkSemanticConfig
snake_case_ = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case_ = BarkCoarseModel
snake_case_ = BarkCoarseConfig
snake_case_ = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case_ = BarkFineModel
snake_case_ = BarkFineConfig
snake_case_ = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case_ = f"""{model_type}_small""" if use_small else model_type
snake_case_ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(snake_case_ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
snake_case_ = torch.load(snake_case_ , map_location=snake_case_ )
# this is a hack
snake_case_ = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
snake_case_ = model_args["""vocab_size"""]
snake_case_ = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case_ = model_args.pop("""n_head""" )
snake_case_ = model_args.pop("""n_embd""" )
snake_case_ = model_args.pop("""n_layer""" )
snake_case_ = ConfigClass(**checkpoint["""model_args"""] )
snake_case_ = ModelClass(config=snake_case_ )
snake_case_ = GenerationConfigClass()
snake_case_ = model_generation_config
snake_case_ = checkpoint["""model"""]
# fixup checkpoint
snake_case_ = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(snake_case_ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case_ = k[len(snake_case_ ) :]
for old_layer_name in new_layer_name_dict:
snake_case_ = new_k.replace(snake_case_ , new_layer_name_dict[old_layer_name] )
snake_case_ = state_dict.pop(snake_case_ )
snake_case_ = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case_ = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
snake_case_ = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case_ = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(snake_case_ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(snake_case_ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(snake_case_ , strict=snake_case_ )
snake_case_ = model.num_parameters(exclude_embeddings=snake_case_ )
snake_case_ = checkpoint["""best_val_loss"""].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(snake_case_ , 3 )} loss""" )
model.eval()
model.to(snake_case_ )
del checkpoint, state_dict
return model
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> Dict:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case_ = """cpu""" # do conversion on cpu
snake_case_ = _get_ckpt_path(snake_case_ , use_small=snake_case_ )
snake_case_ = _load_model(snake_case_ , snake_case_ , model_type=snake_case_ , use_small=snake_case_ )
# load bark initial model
snake_case_ = _bark_load_model(snake_case_ , """cpu""" , model_type=snake_case_ , use_small=snake_case_ )
if model_type == "text":
snake_case_ = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=snake_case_ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
snake_case_ = 5
snake_case_ = 10
if model_type in ["text", "coarse"]:
snake_case_ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
snake_case_ = bark_model(snake_case_ )[0]
snake_case_ = model(snake_case_ )
# take last logits
snake_case_ = output_new_model_total.logits[:, [-1], :]
else:
snake_case_ = 3
snake_case_ = 8
snake_case_ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case_ = model(snake_case_ , snake_case_ )
snake_case_ = bark_model(snake_case_ , snake_case_ )
snake_case_ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
snake_case_ = os.path.join(snake_case_ , snake_case_ )
snake_case_ = BarkSemanticConfig.from_pretrained(os.path.join(snake_case_ , """config.json""" ) )
snake_case_ = BarkCoarseConfig.from_pretrained(os.path.join(snake_case_ , """config.json""" ) )
snake_case_ = BarkFineConfig.from_pretrained(os.path.join(snake_case_ , """config.json""" ) )
snake_case_ = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
snake_case_ = BarkSemanticModel.from_pretrained(snake_case_ )
snake_case_ = BarkCoarseModel.from_pretrained(snake_case_ )
snake_case_ = BarkFineModel.from_pretrained(snake_case_ )
snake_case_ = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
snake_case_ = BarkConfig.from_sub_model_configs(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case_ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case_ = BarkModel(snake_case_ )
snake_case_ = semantic
snake_case_ = coarseAcoustic
snake_case_ = fineAcoustic
snake_case_ = codec
snake_case_ = bark_generation_config
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
bark.save_pretrained(snake_case_ , repo_id=snake_case_ , push_to_hub=snake_case_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 709 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
snake_case_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
try:
snake_case_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case_ = default
else:
# KEY is set, convert it to True or False.
try:
snake_case_ = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
__SCREAMING_SNAKE_CASE : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
__SCREAMING_SNAKE_CASE : Tuple = parse_flag_from_env('RUN_REMOTE', default=False)
__SCREAMING_SNAKE_CASE : List[Any] = parse_flag_from_env('RUN_LOCAL', default=True)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
__SCREAMING_SNAKE_CASE : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
__SCREAMING_SNAKE_CASE : str = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
__SCREAMING_SNAKE_CASE : Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
__SCREAMING_SNAKE_CASE : Optional[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
__SCREAMING_SNAKE_CASE : Any = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
__SCREAMING_SNAKE_CASE : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
__SCREAMING_SNAKE_CASE : Any = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
try:
import faiss # noqa
except ImportError:
snake_case_ = unittest.skip("""test requires faiss""" )(__UpperCamelCase )
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
try:
import regex # noqa
except ImportError:
snake_case_ = unittest.skip("""test requires regex""" )(__UpperCamelCase )
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
try:
import elasticsearch # noqa
except ImportError:
snake_case_ = unittest.skip("""test requires elasticsearch""" )(__UpperCamelCase )
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
try:
import sqlalchemy # noqa
except ImportError:
snake_case_ = unittest.skip("""test requires sqlalchemy""" )(__UpperCamelCase )
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
if not config.TORCH_AVAILABLE:
snake_case_ = unittest.skip("""test requires PyTorch""" )(__UpperCamelCase )
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
if not config.TF_AVAILABLE:
snake_case_ = unittest.skip("""test requires TensorFlow""" )(__UpperCamelCase )
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
if not config.JAX_AVAILABLE:
snake_case_ = unittest.skip("""test requires JAX""" )(__UpperCamelCase )
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if not config.PIL_AVAILABLE:
snake_case_ = unittest.skip("""test requires Pillow""" )(__UpperCamelCase )
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__UpperCamelCase )
else:
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__UpperCamelCase )
else:
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__UpperCamelCase )
else:
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
def _require_spacy_model(_SCREAMING_SNAKE_CASE ):
try:
import spacy # noqa F401
spacy.load(__UpperCamelCase )
except ImportError:
return unittest.skip("""test requires spacy""" )(__UpperCamelCase )
except OSError:
return unittest.skip("""test requires spacy model \'{}\'""".format(__UpperCamelCase ) )(__UpperCamelCase )
else:
return test_case
return _require_spacy_model
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__UpperCamelCase )
else:
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__UpperCamelCase )
else:
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
if not _run_slow_tests or _run_slow_tests == 0:
snake_case_ = unittest.skip("""test is slow""" )(__UpperCamelCase )
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if not _run_local_tests or _run_local_tests == 0:
snake_case_ = unittest.skip("""test is local""" )(__UpperCamelCase )
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if not _run_packaged_tests or _run_packaged_tests == 0:
snake_case_ = unittest.skip("""test is packaged""" )(__UpperCamelCase )
return test_case
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not _run_remote_tests or _run_remote_tests == 0:
snake_case_ = unittest.skip("""test requires remote""" )(__UpperCamelCase )
return test_case
def _a ( *_SCREAMING_SNAKE_CASE ) -> Optional[int]:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__UpperCamelCase ) and name.startswith("""test""" ):
for decorator in decorators:
snake_case_ = decorator(__UpperCamelCase )
setattr(cls , __UpperCamelCase , __UpperCamelCase )
return cls
return decorate
class __A (SCREAMING_SNAKE_CASE_):
'''simple docstring'''
pass
class __A (SCREAMING_SNAKE_CASE_):
'''simple docstring'''
__lowercase: List[str] = 0
__lowercase: Union[str, Any] = 1
__lowercase: Tuple = 2
@contextmanager
def _a ( _SCREAMING_SNAKE_CASE=OfflineSimulationMode.CONNECTION_FAILS , _SCREAMING_SNAKE_CASE=1E-1_6 ) -> Dict:
snake_case_ = requests.Session().request
def timeout_request(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Change the url to an invalid url so that the connection hangs
snake_case_ = '''https://10.255.255.1'''
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
snake_case_ = timeout
try:
return online_request(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
snake_case_ = url
snake_case_ = e.args[0]
snake_case_ = (max_retry_error.args[0].replace("""10.255.255.1""" , f"""OfflineMock[{url}]""" ),)
snake_case_ = (max_retry_error,)
raise
def raise_connection_error(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=__UpperCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCamelCase ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _a ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCamelCase , **__UpperCamelCase ) as tmp_dir:
try:
os.chdir(__UpperCamelCase )
yield
finally:
os.chdir(__UpperCamelCase )
@contextmanager
def _a ( ) -> int:
import gc
gc.collect()
snake_case_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _a ( ) -> Optional[int]:
import gc
gc.collect()
snake_case_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return deepcopy(__UpperCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__UpperCamelCase ).integers(0 , 100 , 10 ).tolist()
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
try:
return func(*__UpperCamelCase , **__UpperCamelCase )
except HTTPError as err:
if str(__UpperCamelCase ).startswith("""500""" ) or str(__UpperCamelCase ).startswith("""502""" ):
pytest.xfail(str(__UpperCamelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCamelCase )
class __A :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ = returncode
snake_case_ = stdout
snake_case_ = stderr
async def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
while True:
snake_case_ = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(__UpperCamelCase ) )
snake_case_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case_ = []
snake_case_ = []
def tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" ):
snake_case_ = line.decode("""utf-8""" ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _SCREAMING_SNAKE_CASE : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _SCREAMING_SNAKE_CASE : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label="""stderr:""" ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=180 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ) -> _RunOutput:
snake_case_ = asyncio.get_event_loop()
snake_case_ = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
snake_case_ = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
snake_case_ = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def _a ( ) -> Optional[int]:
snake_case_ = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
snake_case_ = re.sub(r"""^gw""" , """""" , __UpperCamelCase , 0 , re.M )
return int(__UpperCamelCase )
def _a ( ) -> List[Any]:
snake_case_ = 29_500
snake_case_ = pytest_xdist_worker_id()
return port + uniq_delta
| 710 |
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(_SCREAMING_SNAKE_CASE ) / (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__SCREAMING_SNAKE_CASE : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__SCREAMING_SNAKE_CASE : str = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
snake_case_ = []
for num in range(len(lowerCamelCase_ ) ):
snake_case_ = 0
while 2 * i * i <= odd_composites[num]:
snake_case_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase_ ) == n:
return list_nums
return []
def _a ( ) -> str:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 711 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> str:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 2 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
return number | (1 << position)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
return number & ~(1 << position)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
return number ^ (1 << position)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
return ((number >> position) & 1) == 1
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=18 , UpperCAmelCase_ : Optional[Any]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Any]=True , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = size if size is not None else {"""height""": 18, """width""": 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """clusters""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
snake_case_ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(UpperCAmelCase_ , """image_processor.json""" )
image_processor_first.to_json_file(UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_json_file(UpperCAmelCase_ ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_pretrained(UpperCAmelCase_ ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
pass
def _a ( ) -> str:
snake_case_ = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
snake_case_ = Image.open(dataset[4]["""file"""] )
snake_case_ = Image.open(dataset[5]["""file"""] )
snake_case_ = [imagea, imagea]
return images
@require_vision
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
snake_case_ = prepare_images()
# test non-batched
snake_case_ = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
snake_case_ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase_ )
# test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
snake_case_ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase_ )
| 2 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __A (__lowercase , __lowercase):
'''simple docstring'''
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int] = 768 , ) ->Tuple:
"""simple docstring"""
super().__init__()
snake_case_ = nn.Parameter(torch.zeros(1 , __A ) )
snake_case_ = nn.Parameter(torch.ones(1 , __A ) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : Tuple = None , ) ->int:
"""simple docstring"""
snake_case_ = nn.Parameter(self.mean.to(__A ).to(__A ) )
snake_case_ = nn.Parameter(self.std.to(__A ).to(__A ) )
return self
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[Any] ) ->str:
"""simple docstring"""
snake_case_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Any ) ->List[Any]:
"""simple docstring"""
snake_case_ = (embeds * self.std) + self.mean
return embeds
| 713 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : Dict=24 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=1_000 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = range_bbox
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , ) ->str:
"""simple docstring"""
snake_case_ = LiltModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , ) ->Dict:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LiltForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , ) ->Dict:
"""simple docstring"""
snake_case_ = LiltForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase: Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: List[str] = False
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
return True
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = LiltModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LiltModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
@slow
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(UpperCAmelCase_ )
snake_case_ = torch.tensor([[1, 2]] , device=UpperCAmelCase_ )
snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_ )
snake_case_ = torch.Size([1, 2, 768] )
snake_case_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1E-3 ) )
| 2 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__)
class __A (UpperCamelCase__):
'''simple docstring'''
def __init__( self : Union[str, Any] , **UpperCAmelCase_ : Dict ) ->Tuple:
"""simple docstring"""
super().__init__(**__A )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : Any ) ->List[str]:
"""simple docstring"""
return super().__call__(__A , **__A )
def lowerCAmelCase ( self : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = {}
if "candidate_labels" in kwargs:
snake_case_ = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
snake_case_ = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Any="This is a photo of {}." ) ->List[Any]:
"""simple docstring"""
snake_case_ = load_image(__A )
snake_case_ = self.image_processor(images=[image] , return_tensors=self.framework )
snake_case_ = candidate_labels
snake_case_ = [hypothesis_template.format(__A ) for x in candidate_labels]
snake_case_ = self.tokenizer(__A , return_tensors=self.framework , padding=__A )
snake_case_ = [text_inputs]
return inputs
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = model_inputs.pop("""candidate_labels""" )
snake_case_ = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __A ):
snake_case_ = text_inputs[0]
else:
# Batching case.
snake_case_ = text_inputs[0][0]
snake_case_ = self.model(**__A , **__A )
snake_case_ = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = model_outputs.pop("""candidate_labels""" )
snake_case_ = model_outputs["""logits"""][0]
if self.framework == "pt":
snake_case_ = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case_ = probs.tolist()
if not isinstance(__A , __A ):
snake_case_ = [scores]
elif self.framework == "tf":
snake_case_ = stable_softmax(__A , axis=-1 )
snake_case_ = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
snake_case_ = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__A , __A ) , key=lambda UpperCAmelCase_ : -x[0] )
]
return result
| 714 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = 0
snake_case_ = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case_ = i + 1
else:
snake_case_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 2 | 0 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = [False] * len(_UpperCAmelCase )
snake_case_ = [-1] * len(_UpperCAmelCase )
def dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = True
snake_case_ = c
for u in graph[v]:
if not visited[u]:
dfs(_UpperCAmelCase , 1 - c )
for i in range(len(_UpperCAmelCase ) ):
if not visited[i]:
dfs(_UpperCAmelCase , 0 )
for i in range(len(_UpperCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__SCREAMING_SNAKE_CASE : int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
snake_case_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
sd_pipe.set_scheduler("""sample_euler""" )
snake_case_ = 'A painting of a squirrel eating a burger'
snake_case_ = torch.manual_seed(0 )
snake_case_ = sd_pipe([prompt] , generator=__a , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
sd_pipe.set_scheduler("""sample_euler""" )
snake_case_ = 'A painting of a squirrel eating a burger'
snake_case_ = torch.manual_seed(0 )
snake_case_ = sd_pipe([prompt] , generator=__a , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
snake_case_ = 'A painting of a squirrel eating a burger'
snake_case_ = torch.manual_seed(0 )
snake_case_ = sd_pipe(
[prompt] , generator=__a , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__a , )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 716 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 'Input must be a string of 8 numbers plus letter'
__SCREAMING_SNAKE_CASE : Dict = 'TRWAGMYFPDXBNJZSQVHLCKE'
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
snake_case_ = spanish_id.replace("""-""" , """""" ).upper()
if len(_SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
snake_case_ = int(spanish_id_clean[0:8] )
snake_case_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(_SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __A :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : int = 6 ) ->None:
"""simple docstring"""
snake_case_ = None
snake_case_ = None
self.create_linked_list(__A )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
snake_case_ = Node()
snake_case_ = current_node
snake_case_ = current_node
snake_case_ = current_node
for _ in range(1 , __A ):
snake_case_ = Node()
snake_case_ = current_node
snake_case_ = previous_node
snake_case_ = current_node
snake_case_ = self.front
snake_case_ = previous_node
def lowerCAmelCase ( self : int ) ->bool:
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCAmelCase ( self : Dict ) ->Any | None:
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Any ) ->None:
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
snake_case_ = self.rear.next
if self.rear:
snake_case_ = data
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
snake_case_ = self.front.data
snake_case_ = None
return data
snake_case_ = self.front
snake_case_ = old_front.next
snake_case_ = old_front.data
snake_case_ = None
return data
def lowerCAmelCase ( self : Any ) ->None:
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""" )
def lowerCAmelCase ( self : List[str] ) ->None:
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __A :
'''simple docstring'''
def __init__( self : Dict ) ->None:
"""simple docstring"""
snake_case_ = None
snake_case_ = None
snake_case_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : List[str] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__SCREAMING_SNAKE_CASE : List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__SCREAMING_SNAKE_CASE : int = '▁'
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = VOCAB_FILES_NAMES
__lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]="[CLS]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="[SEP]" , UpperCAmelCase_ : Optional[Any]="<pad>" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : int="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->None:
"""simple docstring"""
snake_case_ = (
AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ , normalized=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else mask_token
)
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Tuple , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Any ) ->str:
"""simple docstring"""
if self.remove_space:
snake_case_ = """ """.join(inputs.strip().split() )
else:
snake_case_ = inputs
snake_case_ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
snake_case_ = unicodedata.normalize("""NFKD""" , UpperCAmelCase_ )
snake_case_ = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
snake_case_ = outputs.lower()
return outputs
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.preprocess_text(UpperCAmelCase_ )
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
snake_case_ = []
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
snake_case_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ = cur_pieces[1:]
else:
snake_case_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict ) ->Any:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 2 | 0 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__SCREAMING_SNAKE_CASE : Any = '\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n'
__SCREAMING_SNAKE_CASE : int = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
__SCREAMING_SNAKE_CASE : List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=False ) ->Optional[int]:
"""simple docstring"""
if rouge_types is None:
snake_case_ = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
snake_case_ = rouge_scorer.RougeScorer(rouge_types=_lowercase , use_stemmer=_lowercase )
if use_aggregator:
snake_case_ = scoring.BootstrapAggregator()
else:
snake_case_ = []
for ref, pred in zip(_lowercase , _lowercase ):
snake_case_ = scorer.score(_lowercase , _lowercase )
if use_aggregator:
aggregator.add_scores(_lowercase )
else:
scores.append(_lowercase )
if use_aggregator:
snake_case_ = aggregator.aggregate()
else:
snake_case_ = {}
for key in scores[0]:
snake_case_ = [score[key] for score in scores]
return result
| 718 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
snake_case_ = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
snake_case_ = int(sequence[i] , 2 )
return sequence
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case_ = gray_code_sequence_string(bit_count - 1 )
snake_case_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case_ = """0""" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case_ = """1""" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__SCREAMING_SNAKE_CASE : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A (datasets.BuilderConfig):
'''simple docstring'''
__lowercase: Optional[datasets.Features] = None
__lowercase: str = "utf-8"
__lowercase: Optional[str] = None
__lowercase: Optional[str] = None
__lowercase: bool = True # deprecated
__lowercase: Optional[int] = None # deprecated
__lowercase: int = 10 << 20 # 10MB
__lowercase: Optional[bool] = None
class __A (datasets.ArrowBasedBuilder):
'''simple docstring'''
__lowercase: List[str] = JsonConfig
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
snake_case_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
snake_case_ = data_files
if isinstance(A__ , A__ ):
snake_case_ = [files]
snake_case_ = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
snake_case_ = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
snake_case_ = [files]
snake_case_ = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Dict ) ->Optional[int]:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
snake_case_ = self.config.features.arrow_schema.field(A__ ).type
snake_case_ = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case_ = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Tuple ) ->Any:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case_ = json.load(A__ )
# We keep only the field we are interested in
snake_case_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
snake_case_ = set().union(*[row.keys() for row in dataset] )
snake_case_ = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
snake_case_ = dataset
snake_case_ = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
snake_case_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case_ = max(self.config.chunksize // 32 , 16 << 10 )
snake_case_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
snake_case_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case_ = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
snake_case_ = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case_ = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
snake_case_ = set().union(*[row.keys() for row in dataset] )
snake_case_ = {col: [row.get(A__ ) for row in dataset] for col in keys}
snake_case_ = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = False, False, False
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = None
__lowercase: bool = True
__lowercase: bool = True
__lowercase: Optional[str] = None
# Automatically constructed
__lowercase: ClassVar[str] = "dict"
__lowercase: ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()})
__lowercase: str = field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE)
def __call__( self : str ) ->Optional[int]:
"""simple docstring"""
return self.pa_type
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : int ) ->int:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install \'soundfile\'.""" ) from err
if isinstance(_a , _a ):
return {"bytes": None, "path": value}
elif isinstance(_a , _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case_ = BytesIO()
sf.write(_a , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a \'sampling_rate\' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case_ = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
snake_case_ = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
snake_case_ = BytesIO(bytes() )
sf.write(_a , _a , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"""An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any = None ) ->Dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
snake_case_ , snake_case_ = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install \'librosa\' and \'soundfile\'.""" ) from err
snake_case_ = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
snake_case_ = token_per_repo_id or {}
snake_case_ = path.split("""::""" )[-1]
try:
snake_case_ = string_to_dict(_a , config.HUB_DATASETS_URL )["""repo_id"""]
snake_case_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case_ = None
with xopen(_a , """rb""" , use_auth_token=_a ) as f:
snake_case_ , snake_case_ = sf.read(_a )
else:
snake_case_ , snake_case_ = sf.read(_a )
snake_case_ = array.T
if self.mono:
snake_case_ = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case_ = librosa.resample(_a , orig_sr=_a , target_sr=self.sampling_rate )
snake_case_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->int:
"""simple docstring"""
if pa.types.is_string(storage.type ):
snake_case_ = pa.array([None] * len(_a ) , type=pa.binary() )
snake_case_ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case_ = pa.array([None] * len(_a ) , type=pa.string() )
snake_case_ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
snake_case_ = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
snake_case_ = storage.field("""bytes""" )
else:
snake_case_ = pa.array([None] * len(_a ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
snake_case_ = storage.field("""path""" )
else:
snake_case_ = pa.array([None] * len(_a ) , type=pa.string() )
snake_case_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_a , self.pa_type )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Tuple ) ->Any:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase_ : str ):
with xopen(_a , """rb""" ) as f:
snake_case_ = f.read()
return bytes_
snake_case_ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case_ = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
snake_case_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_a , self.pa_type )
| 720 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = 'https://openaipublic.azureedge.net/jukebox/models/'
__SCREAMING_SNAKE_CASE : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
snake_case_ = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
snake_case_ = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case_ = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
snake_case_ = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = {}
import re
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
snake_case_ = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
snake_case_ = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
snake_case_ = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
snake_case_ = original_key
snake_case_ = replace_key(_SCREAMING_SNAKE_CASE )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
snake_case_ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
snake_case_ = original_key
snake_case_ = original_key
snake_case_ = value
return new_dict
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
snake_case_ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_SCREAMING_SNAKE_CASE )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , """wb""" ).write(r.content )
snake_case_ = MODEL_MAPPING[model_name.split("""/""" )[-1]]
snake_case_ = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = JukeboxModel(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""]
snake_case_ = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
snake_case_ = old_dic[k]
elif k.endswith(""".w""" ):
snake_case_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case_ = old_dic[k]
else:
snake_case_ = old_dic[k]
snake_case_ = """vqvae""" if i == 0 else f"""priors.{3 - i}"""
snake_case_ = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
snake_case_ = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , """w""" ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 2 | 0 |
"""simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__SCREAMING_SNAKE_CASE : str = os.path.join(git_repo_path, 'src', 'transformers')
__SCREAMING_SNAKE_CASE : Tuple = """
{0} = None
"""
__SCREAMING_SNAKE_CASE : List[str] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__SCREAMING_SNAKE_CASE : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(UpperCamelCase__ )
snake_case_ = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(UpperCamelCase__ , """tokenizers""" )
snake_case_ = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(UpperCamelCase__ , """tensorflow_text""" )
snake_case_ = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers""" )
snake_case_ = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tensorflow_text""" )
snake_case_ = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers_and_vision""" )
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
snake_case_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , UpperCamelCase__ )
self.assertIn("""tensorflow_text""" , UpperCamelCase__ )
self.assertIn("""sentencepiece_and_tokenizers""" , UpperCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = create_dummy_object("""CONSTANT""" , """\'torch\'""" )
self.assertEqual(UpperCamelCase__ , """\nCONSTANT = None\n""" )
snake_case_ = create_dummy_object("""function""" , """\'torch\'""" )
self.assertEqual(
UpperCamelCase__ , """\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n""" )
snake_case_ = """
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
"""
snake_case_ = create_dummy_object("""FakeClass""" , """\'torch\'""" )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
snake_case_ = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , UpperCamelCase__ )
| 721 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE : Dict = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
__SCREAMING_SNAKE_CASE : Dict = 'zero2'
__SCREAMING_SNAKE_CASE : List[Any] = 'zero3'
__SCREAMING_SNAKE_CASE : int = [ZEROa, ZEROa]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
snake_case_ = parameterized.to_safe_name("""_""".join(str(_SCREAMING_SNAKE_CASE ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__SCREAMING_SNAKE_CASE : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A (snake_case__):
'''simple docstring'''
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ) ->Any:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ) ->List[str]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]:
"""simple docstring"""
snake_case_ = models[model]
snake_case_ = self.run_trainer(
stage=UpperCAmelCase_ , model_name=UpperCAmelCase_ , eval_steps=UpperCAmelCase_ , num_train_epochs=1 , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
self.do_checks(UpperCAmelCase_ )
return output_dir
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir("""./xxx""" , after=UpperCAmelCase_ )
snake_case_ = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(UpperCAmelCase_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case_ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
snake_case_ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
snake_case_ = self.get_launcher(UpperCAmelCase_ )
snake_case_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase_ , env=self.get_env() )
return output_dir
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any=False ) ->Tuple:
"""simple docstring"""
snake_case_ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 2 | 0 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=99 , UpperCAmelCase_ : List[str]=64 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Tuple=None , ) ->Dict:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = embedding_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = MegatronBertModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
snake_case_ = MegatronBertForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = MegatronBertForCausalLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ) ->str:
"""simple docstring"""
snake_case_ = MegatronBertForNextSentencePrediction(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = MegatronBertForPreTraining(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , next_sentence_label=UpperCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ) ->Tuple:
"""simple docstring"""
snake_case_ = MegatronBertForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MegatronBertForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MegatronBertForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ) ->int:
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = MegatronBertForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A (snake_case_ , snake_case_ , unittest.TestCase):
'''simple docstring'''
__lowercase: int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__lowercase: Any = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase: Dict = True
# test_resize_embeddings = False
__lowercase: str = False
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=False ) ->str:
"""simple docstring"""
snake_case_ = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
snake_case_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
return inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = MegatronBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->str:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCAmelCase_ )
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
return torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE : Optional[int] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __A (unittest.TestCase):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
snake_case_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
snake_case_ = os.path.join(os.environ["""MYDIR"""] , UpperCAmelCase_ )
snake_case_ = MegatronBertModel.from_pretrained(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.half()
snake_case_ = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ )[0]
snake_case_ = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
snake_case_ = output[0, ii, jj]
snake_case_ = expected[3 * ii + jj]
snake_case_ = """ii={} jj={} a={} b={}""".format(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertTrue(math.isclose(UpperCAmelCase_ , UpperCAmelCase_ , rel_tol=UpperCAmelCase_ , abs_tol=UpperCAmelCase_ ) , msg=UpperCAmelCase_ )
| 700 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__lowercase: ClassVar[Features] = Features({"""audio""": Audio()})
__lowercase: ClassVar[Features] = Features({"""transcription""": Value("""string""")})
__lowercase: str = "audio"
__lowercase: str = "transcription"
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , UpperCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.input_schema.copy()
snake_case_ = features[self.audio_column]
snake_case_ = input_schema
return task_template
@property
def lowerCAmelCase ( self : List[str] ) ->Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 2 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = '''▁'''
__SCREAMING_SNAKE_CASE : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__SCREAMING_SNAKE_CASE : Any = {
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
__SCREAMING_SNAKE_CASE : str = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __A (_UpperCAmelCase):
'''simple docstring'''
__lowercase: Any = VOCAB_FILES_NAMES
__lowercase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase: List[Any] = ["""input_ids""", """attention_mask"""]
__lowercase: List[Any] = []
__lowercase: int = []
def __init__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]="<s>" , UpperCAmelCase_ : str="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : str="<s>" , UpperCAmelCase_ : Optional[int]="<unk>" , UpperCAmelCase_ : List[str]="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=False , **UpperCAmelCase_ : Union[str, Any] , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ = legacy_behaviour
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , tokenizer_file=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowercase__ , **lowercase__ , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model )
snake_case_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase__ )
}
snake_case_ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case_ = src_lang if src_lang is not None else "eng_Latn"
snake_case_ = self.lang_code_to_id[self._src_lang]
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[Any] ) ->str:
"""simple docstring"""
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : Optional[Any] = False ) ->Any:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
snake_case_ = [1] * len(self.prefix_tokens )
snake_case_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any = None ) ->int:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict = None ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[str] ) ->int:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ = src_lang
snake_case_ = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ = self.convert_tokens_to_ids(lowercase__ )
snake_case_ = tgt_lang_id
return inputs
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] ) ->List[str]:
"""simple docstring"""
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Dict ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = "".join(lowercase__ ).replace(lowercase__ , """ """ ).strip()
return out_string
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] = None ) ->Tuple:
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple = "eng_Latn" , UpperCAmelCase_ : List[str] = None , UpperCAmelCase_ : Dict = "fra_Latn" , **UpperCAmelCase_ : Optional[Any] , ) ->int:
"""simple docstring"""
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
| 701 |
"""simple docstring"""
from functools import reduce
__SCREAMING_SNAKE_CASE : Tuple = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _a ( _SCREAMING_SNAKE_CASE = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 2 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = 1
snake_case_ = 2
while i * i <= n:
snake_case_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _a ( ) -> Tuple:
snake_case_ = 1
snake_case_ = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE_ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 702 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = """mctct"""
def __init__( self : Dict , UpperCAmelCase_ : List[Any]=8_065 , UpperCAmelCase_ : Tuple=1_536 , UpperCAmelCase_ : Optional[Any]=36 , UpperCAmelCase_ : int=6_144 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Any=384 , UpperCAmelCase_ : List[str]=920 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : Any=0.3 , UpperCAmelCase_ : Tuple="relu" , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=0.3 , UpperCAmelCase_ : str=0.3 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=0.3 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Optional[Any]=(7,) , UpperCAmelCase_ : Optional[Any]=(3,) , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[str]="sum" , UpperCAmelCase_ : Union[str, Any]=False , **UpperCAmelCase_ : Any , ) ->Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = max_position_embeddings
snake_case_ = layer_norm_eps
snake_case_ = layerdrop
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = conv_glu_dim
snake_case_ = conv_dropout
snake_case_ = num_conv_layers
snake_case_ = input_feat_per_channel
snake_case_ = input_channels
snake_case_ = conv_channels
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 2 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class __A (__A):
'''simple docstring'''
__lowercase: Any = """conditional_detr"""
__lowercase: Optional[Any] = ["""past_key_values"""]
__lowercase: Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Tuple , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Any=300 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : Dict=2_048 , UpperCAmelCase_ : int=8 , UpperCAmelCase_ : str=6 , UpperCAmelCase_ : List[Any]=2_048 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]="relu" , UpperCAmelCase_ : str=256 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Tuple=1.0 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[Any]="sine" , UpperCAmelCase_ : Any="resnet50" , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : List[str]=0.25 , **UpperCAmelCase_ : int , ) ->Dict:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ = CONFIG_MAPPING['resnet'](out_features=["""stage4"""] )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = backbone_config.get("""model_type""" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(UpperCAmelCase_ )
snake_case_ = use_timm_backbone
snake_case_ = backbone_config
snake_case_ = num_channels
snake_case_ = num_queries
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = init_xavier_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = encoder_layers
snake_case_ = auxiliary_loss
snake_case_ = position_embedding_type
snake_case_ = backbone
snake_case_ = use_pretrained_backbone
snake_case_ = dilation
# Hungarian matcher
snake_case_ = class_cost
snake_case_ = bbox_cost
snake_case_ = giou_cost
# Loss coefficients
snake_case_ = mask_loss_coefficient
snake_case_ = dice_loss_coefficient
snake_case_ = cls_loss_coefficient
snake_case_ = bbox_loss_coefficient
snake_case_ = giou_loss_coefficient
snake_case_ = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
return self.d_model
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
class __A (__A):
'''simple docstring'''
__lowercase: Optional[int] = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
return 12
| 703 |
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE = 100 ) -> int:
return sum(int(_SCREAMING_SNAKE_CASE ) for x in str(factorial(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : int = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: str = VQModel
__lowercase: Union[str, Any] = """sample"""
@property
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[str]=(32, 32) ) ->Tuple:
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(UpperCAmelCase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case_ = image.to(UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
| 2 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __A (__a):
'''simple docstring'''
__lowercase: Optional[Any] = '''umt5'''
__lowercase: Tuple = ['''past_key_values''']
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any=250_112 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : List[str]=64 , UpperCAmelCase_ : Union[str, Any]=1_024 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=6 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Union[str, Any]=128 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : str=1E-6 , UpperCAmelCase_ : Optional[int]=1.0 , UpperCAmelCase_ : Optional[Any]="gated-gelu" , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]="T5Tokenizer" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Optional[int]=0 , **UpperCAmelCase_ : str , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=snake_case__ , tokenizer_class=snake_case__ , tie_word_embeddings=snake_case__ , pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = d_kv
snake_case_ = d_ff
snake_case_ = num_layers
snake_case_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case_ = num_heads
snake_case_ = relative_attention_num_buckets
snake_case_ = relative_attention_max_distance
snake_case_ = dropout_rate
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_factor
snake_case_ = feed_forward_proj
snake_case_ = use_cache
snake_case_ = self.feed_forward_proj.split("""-""" )
snake_case_ = act_info[-1]
snake_case_ = act_info[0] == """gated"""
if len(snake_case__ ) > 1 and act_info[0] != "gated" or len(snake_case__ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
snake_case_ = """gelu_new"""
@property
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
return self.d_model
@property
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
return self.num_heads
@property
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
return self.num_layers
class __A (__a):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCAmelCase ( self : List[str] ) ->Tuple:
"""simple docstring"""
snake_case_ = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
snake_case_ = """past_encoder_sequence + sequence"""
snake_case_ = {0: """batch"""}
snake_case_ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
snake_case_ = {0: """batch""", 1: """decoder_sequence"""}
snake_case_ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return 13
@property
def lowerCAmelCase ( self : List[str] ) ->Tuple:
"""simple docstring"""
return 5E-4
| 705 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Dict = KandinskyVaaControlnetPipeline
__lowercase: str = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase: Tuple = False
@property
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return 100
@property
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=0 ) ->List[str]:
"""simple docstring"""
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
# create hint
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = """cpu"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case_ = torch.from_numpy(np.array(UpperCAmelCase_ ) ).float() / 255.0
snake_case_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case_ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
snake_case_ = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = """A robot, 4k photo"""
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ = pipeline(
image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , hint=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , output_type="""np""" , )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : list[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase_ )
self.set_fail_transitions()
def lowerCAmelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str ) ->int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str ) ->None:
"""simple docstring"""
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(UpperCAmelCase_ , UpperCAmelCase_ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case_ = len(self.adlist ) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->None:
"""simple docstring"""
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(UpperCAmelCase_ , self.adlist[child]["""value"""] ) is None
and state != 0
):
snake_case_ = self.adlist[state]["""fail_state"""]
snake_case_ = self.find_next_state(
UpperCAmelCase_ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str ) ->dict[str, list[int]]:
"""simple docstring"""
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(UpperCAmelCase_ ) ):
while (
self.find_next_state(UpperCAmelCase_ , string[i] ) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]["""fail_state"""]
snake_case_ = self.find_next_state(UpperCAmelCase_ , string[i] )
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(UpperCAmelCase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 707 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Dict=[10, 20, 30, 40] , UpperCAmelCase_ : List[Any]=[2, 2, 3, 2] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : int=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : Optional[int]=[2, 3, 4] , UpperCAmelCase_ : List[str]=None , ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_stages
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = out_features
snake_case_ = out_indices
snake_case_ = scope
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = ConvNextVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowercase: Union[str, Any] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: Optional[Any] = False
__lowercase: Any = False
__lowercase: Union[str, Any] = False
__lowercase: Dict = False
def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = True
if model_class.__name__ in [
*get_values(UpperCAmelCase_ ),
*get_values(UpperCAmelCase_ ),
]:
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = False
snake_case_ = True
if (
model_class.__name__
in [*get_values(UpperCAmelCase_ ), *get_values(UpperCAmelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ConvNextVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> str:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = preprocessor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
)
def _a ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34_423]
snake_case_ = math.log(len(_A ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , _A , _A , _A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 708 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = ['model.decoder.embed_positions.weights']
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
if "emb" in name:
snake_case_ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
snake_case_ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
snake_case_ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
snake_case_ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
snake_case_ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
snake_case_ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
snake_case_ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
snake_case_ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
snake_case_ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
snake_case_ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case_ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[Dict, Dict]:
snake_case_ = list(state_dict.keys() )
snake_case_ = {}
for key in keys:
snake_case_ = state_dict.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = rename_keys(_SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case_ = val[:hidden_size, :]
snake_case_ = val[hidden_size : 2 * hidden_size, :]
snake_case_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case_ = val
else:
snake_case_ = val
return state_dict, enc_dec_proj_state_dict
def _a ( _SCREAMING_SNAKE_CASE ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
snake_case_ = 1_024
snake_case_ = 24
snake_case_ = 16
elif checkpoint == "medium":
snake_case_ = 1_536
snake_case_ = 48
snake_case_ = 24
elif checkpoint == "large":
snake_case_ = 2_048
snake_case_ = 48
snake_case_ = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
snake_case_ = MusicgenDecoderConfig(
hidden_size=_SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="cpu" ) -> Tuple:
snake_case_ = MusicGen.get_pretrained(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
snake_case_ = decoder_config_from_checkpoint(_SCREAMING_SNAKE_CASE )
snake_case_ = fairseq_model.lm.state_dict()
snake_case_ , snake_case_ = rename_state_dict(
_SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
snake_case_ = TaEncoderModel.from_pretrained("""t5-base""" )
snake_case_ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
snake_case_ = MusicgenForCausalLM(_SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case_ , snake_case_ = decoder.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
snake_case_ = MusicgenForConditionalGeneration(text_encoder=_SCREAMING_SNAKE_CASE , audio_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_SCREAMING_SNAKE_CASE )
# check we can do a forward pass
snake_case_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case_ = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
snake_case_ = AutoTokenizer.from_pretrained("""t5-base""" )
snake_case_ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
snake_case_ = MusicgenProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
snake_case_ = 2_048
snake_case_ = 2_048
# set other default generation config params
snake_case_ = int(30 * audio_encoder.config.frame_rate )
snake_case_ = True
snake_case_ = 3.0
if pytorch_dump_folder is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 2 | 0 |
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
class __A (_A):
'''simple docstring'''
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : str=None ) ->int:
"""simple docstring"""
snake_case_ = self.layer[current_layer](UpperCamelCase__ , UpperCamelCase__ , head_mask[current_layer] )
snake_case_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _A , )
class __A (_A):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
super().__init__(UpperCamelCase__ )
snake_case_ = BertEncoderWithPabee(UpperCamelCase__ )
self.init_weights()
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = threshold
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict ) ->List[Any]:
"""simple docstring"""
snake_case_ = patience
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = 0
snake_case_ = 0
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.inference_layers_num / self.inference_instances_num
snake_case_ = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(UpperCamelCase__ )
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=False , ) ->List[str]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ = input_ids.size()
elif inputs_embeds is not None:
snake_case_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ )
if token_type_ids is None:
snake_case_ = torch.zeros(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
snake_case_ , snake_case_ , snake_case_ = encoder_hidden_states.size()
snake_case_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
snake_case_ = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ )
snake_case_ = self.invert_attention_mask(UpperCamelCase__ )
else:
snake_case_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ = self.get_head_mask(UpperCamelCase__ , self.config.num_hidden_layers )
snake_case_ = self.embeddings(
input_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ )
snake_case_ = embedding_output
if self.training:
snake_case_ = []
for i in range(self.config.num_hidden_layers ):
snake_case_ = self.encoder.adaptive_forward(
UpperCamelCase__ , current_layer=UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ )
snake_case_ = self.pooler(UpperCamelCase__ )
snake_case_ = output_layers[i](output_dropout(UpperCamelCase__ ) )
res.append(UpperCamelCase__ )
elif self.patience == 0: # Use all layers for inference
snake_case_ = self.encoder(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
snake_case_ = self.pooler(encoder_outputs[0] )
snake_case_ = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase__ )]
else:
snake_case_ = 0
snake_case_ = None
snake_case_ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
snake_case_ = self.encoder.adaptive_forward(
UpperCamelCase__ , current_layer=UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ )
snake_case_ = self.pooler(UpperCamelCase__ )
snake_case_ = output_layers[i](UpperCamelCase__ )
if regression:
snake_case_ = logits.detach()
if patient_result is not None:
snake_case_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
snake_case_ = 0
else:
snake_case_ = logits.detach().argmax(dim=1 )
if patient_result is not None:
snake_case_ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase__ ) ):
patient_counter += 1
else:
snake_case_ = 0
snake_case_ = logits
if patient_counter == self.patience:
break
snake_case_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _A , )
class __A (_A):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] ) ->Dict:
"""simple docstring"""
super().__init__(UpperCamelCase__ )
snake_case_ = config.num_labels
snake_case_ = BertModelWithPabee(UpperCamelCase__ )
snake_case_ = nn.Dropout(config.hidden_dropout_prob )
snake_case_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , ) ->Dict:
"""simple docstring"""
snake_case_ = self.bert(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
snake_case_ = (logits[-1],)
if labels is not None:
snake_case_ = None
snake_case_ = 0
for ix, logits_item in enumerate(UpperCamelCase__ ):
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
snake_case_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
snake_case_ = (total_loss / total_weights,) + outputs
return outputs
| 709 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
snake_case_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ) -> List[Any]:
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
snake_case_ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
snake_case_ = cached_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
snake_case_ = config_class.from_json_file(_SCREAMING_SNAKE_CASE )
snake_case_ = True
snake_case_ = True
print(f"""Building TensorFlow model from configuration: {config}""" )
snake_case_ = model_class(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
snake_case_ = cached_file(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
snake_case_ = load_pytorch_checkpoint_in_tfa_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
snake_case_ = tf_model(tf_model.dummy_inputs , training=_SCREAMING_SNAKE_CASE ) # build the network
snake_case_ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
snake_case_ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE , state_dict=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
snake_case_ = pt_model(**pt_model.dummy_inputs )
snake_case_ = pto[0].numpy()
snake_case_ = tfo[0].numpy()
snake_case_ = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(_SCREAMING_SNAKE_CASE , save_format="""h5""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , ) -> Optional[int]:
if args_model_type is None:
snake_case_ = list(MODEL_CLASSES.keys() )
else:
snake_case_ = [args_model_type]
for j, model_type in enumerate(_SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 100 )
print(f""" Converting model type {j}/{len(_SCREAMING_SNAKE_CASE )}: {model_type}""" )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
snake_case_ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
snake_case_ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
snake_case_ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
snake_case_ = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(_SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}""" )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
snake_case_ = cached_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
snake_case_ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
snake_case_ = cached_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
snake_case_ = model_shortcut_name
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
snake_case_ = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=_SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=_SCREAMING_SNAKE_CASE , config_file=_SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(_SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=_SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(_SCREAMING_SNAKE_CASE )
os.remove(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 710 |
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(_SCREAMING_SNAKE_CASE ) / (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__SCREAMING_SNAKE_CASE : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 2 | 0 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"""{test_file} instead.""" )
snake_case_ = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
snake_case_ = components[:-1] + [test_fn.replace(""".py""" , """""" )]
snake_case_ = """.""".join(__snake_case )
return test_module_path
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = get_module_path(__snake_case )
snake_case_ = importlib.import_module(__snake_case )
return test_module
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = []
snake_case_ = get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = []
snake_case_ = get_test_module(__snake_case )
for attr in dir(__snake_case ):
snake_case_ = getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case_ = getattr(__snake_case , """all_model_classes""" , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = get_test_classes(__snake_case )
snake_case_ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = test_class()
if hasattr(__snake_case , """setUp""" ):
test.setUp()
snake_case_ = None
if hasattr(__snake_case , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case_ = test.model_tester.__class__
return model_tester
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = get_test_classes(__snake_case )
snake_case_ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = get_test_classes_for_model(__snake_case , __snake_case )
snake_case_ = []
for test_class in test_classes:
snake_case_ = get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = get_test_classes(__snake_case )
snake_case_ = {test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = get_model_classes(__snake_case )
snake_case_ = {
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = get_model_classes(__snake_case )
snake_case_ = {
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 711 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> str:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['ConvNextFeatureExtractor']
__SCREAMING_SNAKE_CASE : Any = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 712 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=18 , UpperCAmelCase_ : Optional[Any]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Any]=True , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = size if size is not None else {"""height""": 18, """width""": 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """clusters""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
snake_case_ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(UpperCAmelCase_ , """image_processor.json""" )
image_processor_first.to_json_file(UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_json_file(UpperCAmelCase_ ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_pretrained(UpperCAmelCase_ ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
pass
def _a ( ) -> str:
snake_case_ = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
snake_case_ = Image.open(dataset[4]["""file"""] )
snake_case_ = Image.open(dataset[5]["""file"""] )
snake_case_ = [imagea, imagea]
return images
@require_vision
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
snake_case_ = prepare_images()
# test non-batched
snake_case_ = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
snake_case_ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase_ )
# test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
snake_case_ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase_ )
| 2 | 0 |
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if len(A__ ) <= 1:
return [tuple(A__ )]
snake_case_ = []
def generate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , A__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
snake_case_ , snake_case_ = arr[k - 1], arr[i]
else: # k is odd
snake_case_ , snake_case_ = arr[k - 1], arr[0]
generate(k - 1 , A__ )
generate(len(A__ ) , A__ )
return res
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 713 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : Dict=24 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=1_000 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = range_bbox
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , ) ->str:
"""simple docstring"""
snake_case_ = LiltModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , ) ->Dict:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LiltForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , ) ->Dict:
"""simple docstring"""
snake_case_ = LiltForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase: Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: List[str] = False
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
return True
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = LiltModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LiltModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
@slow
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(UpperCAmelCase_ )
snake_case_ = torch.tensor([[1, 2]] , device=UpperCAmelCase_ )
snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_ )
snake_case_ = torch.Size([1, 2, 768] )
snake_case_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1E-3 ) )
| 2 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __A (_UpperCamelCase):
'''simple docstring'''
__lowercase: str = """mvp"""
__lowercase: Any = ["""past_key_values"""]
__lowercase: List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Dict , UpperCAmelCase_ : Tuple=50_267 , UpperCAmelCase_ : Any=1_024 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[str]=4_096 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : List[Any]=4_096 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Optional[Any]=1_024 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : List[Any]=100 , UpperCAmelCase_ : str=800 , **UpperCAmelCase_ : Tuple , ) ->int:
"""simple docstring"""
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = classifier_dropout
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ = use_prompt
snake_case_ = prompt_length
snake_case_ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase_ ):
snake_case_ = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 714 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = 0
snake_case_ = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case_ = i + 1
else:
snake_case_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 2 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __A (snake_case__):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowercase_ , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(lowercase_ , """num_encoder_blocks""" ) )
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=64 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Dict=[2, 2, 2, 2] , UpperCAmelCase_ : Optional[Any]=[8, 4, 2, 1] , UpperCAmelCase_ : str=[16, 32, 64, 128] , UpperCAmelCase_ : Optional[Any]=[1, 4, 8, 16] , UpperCAmelCase_ : Tuple=[1, 2, 4, 8] , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : List[str]=None , ) ->List[Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_encoder_blocks
snake_case_ = sr_ratios
snake_case_ = depths
snake_case_ = hidden_sizes
snake_case_ = downsampling_rates
snake_case_ = num_attention_heads
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
snake_case_ = SegformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ )
snake_case_ = snake_case_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = SegformerForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
snake_case_ = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ) ->str:
"""simple docstring"""
snake_case_ = 1
snake_case_ = SegformerForSemanticSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowercase_ )
snake_case_ = model(lowercase_ , labels=lowercase_ )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowercase: Union[str, Any] = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase: Optional[Any] = True
__lowercase: Optional[int] = False
__lowercase: int = False
__lowercase: Union[str, Any] = False
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = SegformerModelTester(self )
snake_case_ = SegformerConfigTester(self , config_class=lowercase_ )
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowercase_ )
def lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowercase_ )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ = outputs.attentions
snake_case_ = sum(self.model_tester.depths )
self.assertEqual(len(lowercase_ ) , lowercase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ = outputs.attentions
self.assertEqual(len(lowercase_ ) , lowercase_ )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
snake_case_ = (self.model_tester.image_size // 32) ** 2
snake_case_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
snake_case_ = len(lowercase_ )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
snake_case_ = outputs.attentions
self.assertEqual(len(lowercase_ ) , lowercase_ )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ):
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ = outputs.hidden_states
snake_case_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowercase_ ) , lowercase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_ ):
continue
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
snake_case_ = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
snake_case_ = model(**lowercase_ ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SegformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _a ( ) -> Union[str, Any]:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_ )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowercase_ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors="""pt""" )
snake_case_ = encoded_inputs.pixel_values.to(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ )
snake_case_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_ )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(lowercase_ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors="""pt""" )
snake_case_ = encoded_inputs.pixel_values.to(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ )
snake_case_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase_ , atol=1E-1 ) )
@slow
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
snake_case_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_ )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowercase_ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors="""pt""" )
snake_case_ = encoded_inputs.pixel_values.to(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(500, 300)] )
snake_case_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowercase_ )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=lowercase_ )
snake_case_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , lowercase_ )
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'Hello, World!'
__SCREAMING_SNAKE_CASE : Dict = 'en_XX'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = Path("""data_bin""" )
snake_case_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(snake_case__ ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(snake_case__ ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
snake_case_ = xmod.model.encoder.sentence_encoder
snake_case_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
snake_case_ = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , snake_case__ )
snake_case_ = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ = xmod_sent_encoder.embed_tokens.weight
snake_case_ = xmod_sent_encoder.embed_positions.weight
snake_case_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
snake_case_ = xmod_sent_encoder.layernorm_embedding.weight
snake_case_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ = model.roberta.encoder.layer[i]
snake_case_ = xmod_sent_encoder.layers[i]
# self attention
snake_case_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
snake_case_ = xmod_layer.self_attn.q_proj.weight
snake_case_ = xmod_layer.self_attn.q_proj.bias
snake_case_ = xmod_layer.self_attn.k_proj.weight
snake_case_ = xmod_layer.self_attn.k_proj.bias
snake_case_ = xmod_layer.self_attn.v_proj.weight
snake_case_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
snake_case_ = xmod_layer.self_attn.out_proj.weight
snake_case_ = xmod_layer.self_attn.out_proj.bias
snake_case_ = xmod_layer.self_attn_layer_norm.weight
snake_case_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
snake_case_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
snake_case_ = xmod_layer.fca.weight
snake_case_ = xmod_layer.fca.bias
# output
snake_case_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
snake_case_ = xmod_layer.fca.weight
snake_case_ = xmod_layer.fca.bias
snake_case_ = xmod_layer.final_layer_norm.weight
snake_case_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
snake_case_ = xmod_layer.adapter_layer_norm.weight
snake_case_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
snake_case_ = bert_output.adapter_modules[lang_code]
snake_case_ = xmod_layer.adapter_modules[lang_code]
snake_case_ = from_adapter.fca.weight
snake_case_ = from_adapter.fca.bias
snake_case_ = from_adapter.fca.weight
snake_case_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
snake_case_ = xmod_sent_encoder.layer_norm.weight
snake_case_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
snake_case_ = xmod.model.classification_heads["""mnli"""].dense.weight
snake_case_ = xmod.model.classification_heads["""mnli"""].dense.bias
snake_case_ = xmod.model.classification_heads["""mnli"""].out_proj.weight
snake_case_ = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
snake_case_ = xmod.model.encoder.lm_head.dense.weight
snake_case_ = xmod.model.encoder.lm_head.dense.bias
snake_case_ = xmod.model.encoder.lm_head.layer_norm.weight
snake_case_ = xmod.model.encoder.lm_head.layer_norm.bias
snake_case_ = xmod.model.encoder.lm_head.weight
snake_case_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
snake_case_ = model(snake_case__ )[0]
if classification_head:
snake_case_ = xmod.model.classification_heads["""mnli"""](xmod.extract_features(snake_case__ ) )
else:
snake_case_ = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
snake_case_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
snake_case_ = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 716 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 'Input must be a string of 8 numbers plus letter'
__SCREAMING_SNAKE_CASE : Dict = 'TRWAGMYFPDXBNJZSQVHLCKE'
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
snake_case_ = spanish_id.replace("""-""" , """""" ).upper()
if len(_SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
snake_case_ = int(spanish_id_clean[0:8] )
snake_case_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(_SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Dict=0 , ) ->List[Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = projection_dim
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
snake_case_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ) ->Any:
"""simple docstring"""
snake_case_ = TFDPRContextEncoder(config=UpperCAmelCase__ )
snake_case_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
snake_case_ = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
snake_case_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = TFDPRQuestionEncoder(config=UpperCAmelCase__ )
snake_case_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
snake_case_ = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
snake_case_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ) ->Any:
"""simple docstring"""
snake_case_ = TFDPRReader(config=UpperCAmelCase__ )
snake_case_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
snake_case_
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowercase: Optional[Any] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
__lowercase: Union[str, Any] = False
__lowercase: int = False
__lowercase: List[str] = False
__lowercase: List[str] = False
__lowercase: str = False
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
snake_case_ = TFDPRModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase__ )
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase__ )
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase__ )
@slow
def lowerCAmelCase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRContextEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRContextEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRReader.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_tf
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
snake_case_ = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
snake_case_ = model(UpperCAmelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
snake_case_ = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 717 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : List[str] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__SCREAMING_SNAKE_CASE : List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__SCREAMING_SNAKE_CASE : int = '▁'
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = VOCAB_FILES_NAMES
__lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]="[CLS]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="[SEP]" , UpperCAmelCase_ : Optional[Any]="<pad>" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : int="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->None:
"""simple docstring"""
snake_case_ = (
AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ , normalized=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else mask_token
)
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Tuple , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Any ) ->str:
"""simple docstring"""
if self.remove_space:
snake_case_ = """ """.join(inputs.strip().split() )
else:
snake_case_ = inputs
snake_case_ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
snake_case_ = unicodedata.normalize("""NFKD""" , UpperCAmelCase_ )
snake_case_ = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
snake_case_ = outputs.lower()
return outputs
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.preprocess_text(UpperCAmelCase_ )
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
snake_case_ = []
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
snake_case_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ = cur_pieces[1:]
else:
snake_case_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict ) ->Any:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 2 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = [1]
for i in range(2 , _SCREAMING_SNAKE_CASE ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
snake_case_ = []
snake_case_ = list(range(_SCREAMING_SNAKE_CASE ) )
# Find permutation
while factorials:
snake_case_ = factorials.pop()
snake_case_ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
snake_case_ = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
snake_case_ = int(sequence[i] , 2 )
return sequence
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case_ = gray_code_sequence_string(bit_count - 1 )
snake_case_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case_ = """0""" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case_ = """1""" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
from __future__ import annotations
import numpy as np
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
return np.maximum(0 , __a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger('transformers.models.encodec')
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__SCREAMING_SNAKE_CASE : Tuple = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__SCREAMING_SNAKE_CASE : Dict = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__SCREAMING_SNAKE_CASE : Tuple = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__SCREAMING_SNAKE_CASE : List[Any] = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__SCREAMING_SNAKE_CASE : List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__SCREAMING_SNAKE_CASE : Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Dict = []
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
for attribute in key.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
elif weight_type == "weight_ih_l0":
snake_case_ = value
elif weight_type == "weight_hh_l0":
snake_case_ = value
elif weight_type == "bias_ih_l0":
snake_case_ = value
elif weight_type == "bias_hh_l0":
snake_case_ = value
elif weight_type == "weight_ih_l1":
snake_case_ = value
elif weight_type == "weight_hh_l1":
snake_case_ = value
elif weight_type == "bias_ih_l1":
snake_case_ = value
elif weight_type == "bias_hh_l1":
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_ = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case_ = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case_ = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(f"""{name} was ignored""" )
continue
snake_case_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case_ = key.split(""".*.""" )
if prefix in name and suffix in name:
snake_case_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = "weight_g"
elif "weight_v" in name:
snake_case_ = "weight_v"
elif "weight_ih_l0" in name:
snake_case_ = "weight_ih_l0"
elif "weight_hh_l0" in name:
snake_case_ = "weight_hh_l0"
elif "bias_ih_l0" in name:
snake_case_ = "bias_ih_l0"
elif "bias_hh_l0" in name:
snake_case_ = "bias_hh_l0"
elif "weight_ih_l1" in name:
snake_case_ = "weight_ih_l1"
elif "weight_hh_l1" in name:
snake_case_ = "weight_hh_l1"
elif "bias_ih_l1" in name:
snake_case_ = "bias_ih_l1"
elif "bias_hh_l1" in name:
snake_case_ = "bias_hh_l1"
elif "bias" in name:
snake_case_ = "bias"
elif "weight" in name:
snake_case_ = "weight"
elif "running_mean" in name:
snake_case_ = "running_mean"
elif "running_var" in name:
snake_case_ = "running_var"
elif "num_batches_tracked" in name:
snake_case_ = "num_batches_tracked"
else:
snake_case_ = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
if config_path is not None:
snake_case_ = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case_ = [8, 5, 4, 4]
snake_case_ = [2.2]
snake_case_ = 64
snake_case_ = 32_000
snake_case_ = 2_048
snake_case_ = False
snake_case_ = False
snake_case_ = False
elif model_name == "encodec_48khz":
snake_case_ = [8, 5, 4, 2]
snake_case_ = [3.0, 6.0, 12.0, 24.0]
snake_case_ = 48_000
snake_case_ = 2
snake_case_ = False
snake_case_ = "time_group_norm"
snake_case_ = True
snake_case_ = 1.0
snake_case_ = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
snake_case_ = EncodecModel(_SCREAMING_SNAKE_CASE )
snake_case_ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case_ = original_checkpoint["best_state"]
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 720 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = 'https://openaipublic.azureedge.net/jukebox/models/'
__SCREAMING_SNAKE_CASE : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
snake_case_ = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
snake_case_ = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case_ = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
snake_case_ = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = {}
import re
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
snake_case_ = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
snake_case_ = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
snake_case_ = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
snake_case_ = original_key
snake_case_ = replace_key(_SCREAMING_SNAKE_CASE )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
snake_case_ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
snake_case_ = original_key
snake_case_ = original_key
snake_case_ = value
return new_dict
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
snake_case_ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_SCREAMING_SNAKE_CASE )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , """wb""" ).write(r.content )
snake_case_ = MODEL_MAPPING[model_name.split("""/""" )[-1]]
snake_case_ = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = JukeboxModel(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""]
snake_case_ = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
snake_case_ = old_dic[k]
elif k.endswith(""".w""" ):
snake_case_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case_ = old_dic[k]
else:
snake_case_ = old_dic[k]
snake_case_ = """vqvae""" if i == 0 else f"""priors.{3 - i}"""
snake_case_ = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
snake_case_ = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , """w""" ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 2 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
snake_case_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
snake_case_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE : Dict = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
__SCREAMING_SNAKE_CASE : Dict = 'zero2'
__SCREAMING_SNAKE_CASE : List[Any] = 'zero3'
__SCREAMING_SNAKE_CASE : int = [ZEROa, ZEROa]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
snake_case_ = parameterized.to_safe_name("""_""".join(str(_SCREAMING_SNAKE_CASE ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__SCREAMING_SNAKE_CASE : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A (snake_case__):
'''simple docstring'''
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ) ->Any:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ) ->List[str]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]:
"""simple docstring"""
snake_case_ = models[model]
snake_case_ = self.run_trainer(
stage=UpperCAmelCase_ , model_name=UpperCAmelCase_ , eval_steps=UpperCAmelCase_ , num_train_epochs=1 , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
self.do_checks(UpperCAmelCase_ )
return output_dir
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir("""./xxx""" , after=UpperCAmelCase_ )
snake_case_ = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(UpperCAmelCase_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case_ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
snake_case_ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
snake_case_ = self.get_launcher(UpperCAmelCase_ )
snake_case_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase_ , env=self.get_env() )
return output_dir
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any=False ) ->Tuple:
"""simple docstring"""
snake_case_ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 2 | 0 |
"""simple docstring"""
from typing import Any
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
if not input_list:
return []
snake_case_ = [input_list.count(_lowerCAmelCase ) for value in input_list]
snake_case_ = max(_lowerCAmelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCAmelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__lowercase: ClassVar[Features] = Features({"""audio""": Audio()})
__lowercase: ClassVar[Features] = Features({"""transcription""": Value("""string""")})
__lowercase: str = "audio"
__lowercase: str = "transcription"
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , UpperCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.input_schema.copy()
snake_case_ = features[self.audio_column]
snake_case_ = input_schema
return task_template
@property
def lowerCAmelCase ( self : List[str] ) ->Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 2 | 0 |
"""simple docstring"""
import os
def _a ( ) -> str:
snake_case_ = os.path.join(os.path.dirname(lowerCamelCase_ ) , """num.txt""" )
with open(lowerCamelCase_ ) as file_hand:
return str(sum(int(lowerCamelCase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 701 |
"""simple docstring"""
from functools import reduce
__SCREAMING_SNAKE_CASE : Tuple = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _a ( _SCREAMING_SNAKE_CASE = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[complex, complex]:
if a == 0:
raise ValueError("""Coefficient \'a\' must not be zero.""" )
snake_case_ = b * b - 4 * a * c
snake_case_ = (-b + sqrt(UpperCamelCase__ )) / (2 * a)
snake_case_ = (-b - sqrt(UpperCamelCase__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[int]:
snake_case_ , snake_case_ = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 702 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = """mctct"""
def __init__( self : Dict , UpperCAmelCase_ : List[Any]=8_065 , UpperCAmelCase_ : Tuple=1_536 , UpperCAmelCase_ : Optional[Any]=36 , UpperCAmelCase_ : int=6_144 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Any=384 , UpperCAmelCase_ : List[str]=920 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : Any=0.3 , UpperCAmelCase_ : Tuple="relu" , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=0.3 , UpperCAmelCase_ : str=0.3 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=0.3 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Optional[Any]=(7,) , UpperCAmelCase_ : Optional[Any]=(3,) , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[str]="sum" , UpperCAmelCase_ : Union[str, Any]=False , **UpperCAmelCase_ : Any , ) ->Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = max_position_embeddings
snake_case_ = layer_norm_eps
snake_case_ = layerdrop
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = conv_glu_dim
snake_case_ = conv_dropout
snake_case_ = num_conv_layers
snake_case_ = input_feat_per_channel
snake_case_ = input_channels
snake_case_ = conv_channels
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 2 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __A :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : List[str]=64 , UpperCAmelCase_ : Tuple=None ) ->int:
"""simple docstring"""
snake_case_ = np.random.default_rng(lowercase_ )
snake_case_ = length
snake_case_ = rng.normal(size=(length,) ).astype(np.floataa )
snake_case_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : int ) ->Dict:
"""simple docstring"""
return self.length
def __getitem__( self : Tuple , UpperCAmelCase_ : str ) ->List[Any]:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __A (torch.nn.Module):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int=False ) ->Dict:
"""simple docstring"""
super().__init__()
snake_case_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case_ = True
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Any=None ) ->Any:
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case_ = False
return x * self.a[0] + self.b[0]
class __A (torch.nn.Module):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Dict=False ) ->Dict:
"""simple docstring"""
super().__init__()
snake_case_ = torch.nn.Parameter(torch.tensor(lowercase_ ).float() )
snake_case_ = torch.nn.Parameter(torch.tensor(lowercase_ ).float() )
snake_case_ = True
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Tuple=None ) ->Union[str, Any]:
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case_ = False
return x * self.a + self.b
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 ) -> int:
from datasets import load_dataset
from transformers import AutoTokenizer
snake_case_ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case_ = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
snake_case_ = load_dataset("""csv""" , data_files=__SCREAMING_SNAKE_CASE )
snake_case_ = datasets["train"].unique("""label""" )
snake_case_ = {v: i for i, v in enumerate(__SCREAMING_SNAKE_CASE )}
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
if "label" in examples:
snake_case_ = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ = DataLoader(tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=2 )
snake_case_ = DataLoader(tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=1 )
return train_dataloader, eval_dataloader
| 703 |
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE = 100 ) -> int:
return sum(int(_SCREAMING_SNAKE_CASE ) for x in str(factorial(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 2 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
snake_case_ = len(_SCREAMING_SNAKE_CASE )
for i in range(length - 1 ):
snake_case_ = i
for k in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if collection[k] < collection[least]:
snake_case_ = k
if least != i:
snake_case_ , snake_case_ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : Tuple = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 704 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: str = VQModel
__lowercase: Union[str, Any] = """sample"""
@property
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[str]=(32, 32) ) ->Tuple:
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(UpperCAmelCase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case_ = image.to(UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
| 2 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def _a ( _SCREAMING_SNAKE_CASE = None ) -> int:
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
snake_case_ = nums[0]
for i in range(1 , len(_lowerCamelCase ) ):
snake_case_ = nums[i]
snake_case_ = max(_lowerCamelCase , ans + num , _lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('Enter number of elements : ').strip())
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 705 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Dict = KandinskyVaaControlnetPipeline
__lowercase: str = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase: Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase: Tuple = False
@property
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return 100
@property
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=0 ) ->List[str]:
"""simple docstring"""
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
# create hint
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = """cpu"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case_ = torch.from_numpy(np.array(UpperCAmelCase_ ) ).float() / 255.0
snake_case_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case_ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
snake_case_ = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = """A robot, 4k photo"""
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case_ = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case_ = pipeline(
image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , hint=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , output_type="""np""" , )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 2 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def _a ( _SCREAMING_SNAKE_CASE = "." ) -> str:
for dir_path, dir_names, filenames in os.walk(_SCREAMING_SNAKE_CASE ):
snake_case_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"):
yield os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).lstrip("""./""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return f"""{i * " "}*""" if i else "\n##"
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(_SCREAMING_SNAKE_CASE )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def _a ( _SCREAMING_SNAKE_CASE = "." ) -> List[Any]:
snake_case_ = """"""
for filepath in sorted(good_file_paths(_SCREAMING_SNAKE_CASE ) ):
snake_case_ , snake_case_ = os.path.split(_SCREAMING_SNAKE_CASE )
if filepath != old_path:
snake_case_ = print_path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case_ = f"""{filepath}/{filename}""".replace(""" """ , """%20""" )
snake_case_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f"""{md_prefix(_SCREAMING_SNAKE_CASE )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 706 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : list[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase_ )
self.set_fail_transitions()
def lowerCAmelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str ) ->int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str ) ->None:
"""simple docstring"""
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(UpperCAmelCase_ , UpperCAmelCase_ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case_ = len(self.adlist ) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->None:
"""simple docstring"""
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(UpperCAmelCase_ , self.adlist[child]["""value"""] ) is None
and state != 0
):
snake_case_ = self.adlist[state]["""fail_state"""]
snake_case_ = self.find_next_state(
UpperCAmelCase_ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str ) ->dict[str, list[int]]:
"""simple docstring"""
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(UpperCAmelCase_ ) ):
while (
self.find_next_state(UpperCAmelCase_ , string[i] ) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]["""fail_state"""]
snake_case_ = self.find_next_state(UpperCAmelCase_ , string[i] )
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(UpperCAmelCase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case_ = len(UpperCAmelCase_ ) - 1
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase_ ) , 5 ) == 1
return output_values
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Union[str, Any] ) ->str:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ = self.basis_function(UpperCAmelCase_ )
snake_case_ = 0.0
snake_case_ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase ( self : int , UpperCAmelCase_ : List[str] = 0.01 ) ->Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
snake_case_ = [] # x coordinates of points to plot
snake_case_ = [] # y coordinates of points to plot
snake_case_ = 0.0
while t <= 1:
snake_case_ = self.bezier_curve_function(UpperCAmelCase_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case_ = [i[0] for i in self.list_of_points]
snake_case_ = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase_ , UpperCAmelCase_ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(UpperCAmelCase_ , UpperCAmelCase_ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 707 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Dict=[10, 20, 30, 40] , UpperCAmelCase_ : List[Any]=[2, 2, 3, 2] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : int=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : Optional[int]=[2, 3, 4] , UpperCAmelCase_ : List[str]=None , ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_stages
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = out_features
snake_case_ = out_indices
snake_case_ = scope
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = ConvNextVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowercase: Union[str, Any] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: Optional[Any] = False
__lowercase: Any = False
__lowercase: Union[str, Any] = False
__lowercase: Dict = False
def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = ConvNextVaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = True
if model_class.__name__ in [
*get_values(UpperCAmelCase_ ),
*get_values(UpperCAmelCase_ ),
]:
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ = False
snake_case_ = True
if (
model_class.__name__
in [*get_values(UpperCAmelCase_ ), *get_values(UpperCAmelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ConvNextVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> str:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = preprocessor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 2 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = " " ) -> int:
snake_case_ = []
snake_case_ = 0
for index, char in enumerate(lowerCAmelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
snake_case_ = index + 1
elif index + 1 == len(lowerCAmelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 708 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = ['model.decoder.embed_positions.weights']
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
if "emb" in name:
snake_case_ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
snake_case_ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
snake_case_ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
snake_case_ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
snake_case_ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
snake_case_ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
snake_case_ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
snake_case_ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
snake_case_ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
snake_case_ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case_ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[Dict, Dict]:
snake_case_ = list(state_dict.keys() )
snake_case_ = {}
for key in keys:
snake_case_ = state_dict.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = rename_keys(_SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case_ = val[:hidden_size, :]
snake_case_ = val[hidden_size : 2 * hidden_size, :]
snake_case_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case_ = val
else:
snake_case_ = val
return state_dict, enc_dec_proj_state_dict
def _a ( _SCREAMING_SNAKE_CASE ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
snake_case_ = 1_024
snake_case_ = 24
snake_case_ = 16
elif checkpoint == "medium":
snake_case_ = 1_536
snake_case_ = 48
snake_case_ = 24
elif checkpoint == "large":
snake_case_ = 2_048
snake_case_ = 48
snake_case_ = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
snake_case_ = MusicgenDecoderConfig(
hidden_size=_SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="cpu" ) -> Tuple:
snake_case_ = MusicGen.get_pretrained(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
snake_case_ = decoder_config_from_checkpoint(_SCREAMING_SNAKE_CASE )
snake_case_ = fairseq_model.lm.state_dict()
snake_case_ , snake_case_ = rename_state_dict(
_SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
snake_case_ = TaEncoderModel.from_pretrained("""t5-base""" )
snake_case_ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
snake_case_ = MusicgenForCausalLM(_SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case_ , snake_case_ = decoder.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
snake_case_ = MusicgenForConditionalGeneration(text_encoder=_SCREAMING_SNAKE_CASE , audio_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_SCREAMING_SNAKE_CASE )
# check we can do a forward pass
snake_case_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case_ = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
snake_case_ = AutoTokenizer.from_pretrained("""t5-base""" )
snake_case_ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
snake_case_ = MusicgenProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
snake_case_ = 2_048
snake_case_ = 2_048
# set other default generation config params
snake_case_ = int(30 * audio_encoder.config.frame_rate )
snake_case_ = True
snake_case_ = 3.0
if pytorch_dump_folder is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 2 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class __A (a__):
'''simple docstring'''
def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ) ->None:
"""simple docstring"""
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 709 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
snake_case_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__SCREAMING_SNAKE_CASE : Tuple = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 710 |
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(_SCREAMING_SNAKE_CASE ) / (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__SCREAMING_SNAKE_CASE : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 2 | 0 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _a ( ) -> Union[str, Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowerCAmelCase ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def _a ( ) -> List[str]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def _a ( ) -> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowerCAmelCase ):
http_head("""https://huggingface.co""" )
| 711 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> str:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 2 | 0 |
Subsets and Splits