code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _lowerCAmelCase( __A ):
UpperCAmelCase = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase = 0.01
with locka.acquire():
with pytest.raises(lowercase__ ):
UpperCAmelCase = time.time()
locka.acquire(lowercase__ )
assert time.time() - _start > timeout
def _lowerCAmelCase( __A ):
UpperCAmelCase = "a" * 1000 + ".lock"
UpperCAmelCase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(lowercase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase__ ):
locka.acquire(0 )
| 718 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __magic_name__ ( _snake_case , _snake_case ):
UpperCAmelCase = """convnextv2"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=2_2_4 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_stages
UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_path_rate
UpperCAmelCase = image_size
UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 1 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class __magic_name__ ( __lowercase ):
UpperCAmelCase = ['''input_features''', '''is_longer''']
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=6_4 , lowerCAmelCase__ : List[str]=4_8_0_0_0 , lowerCAmelCase__ : List[Any]=4_8_0 , lowerCAmelCase__ : Dict=1_0 , lowerCAmelCase__ : List[Any]=1_0_2_4 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : float = 0 , lowerCAmelCase__ : float = 1_4_0_0_0 , lowerCAmelCase__ : int = None , lowerCAmelCase__ : str = "fusion" , lowerCAmelCase__ : str = "repeatpad" , **lowerCAmelCase__ : Union[str, Any] , ) -> Dict:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
UpperCAmelCase = top_db
UpperCAmelCase = truncation
UpperCAmelCase = padding
UpperCAmelCase = fft_window_size
UpperCAmelCase = (fft_window_size >> 1) + 1
UpperCAmelCase = hop_length
UpperCAmelCase = max_length_s
UpperCAmelCase = max_length_s * sampling_rate
UpperCAmelCase = sampling_rate
UpperCAmelCase = frequency_min
UpperCAmelCase = frequency_max
UpperCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm=__a , mel_scale="htk" , )
UpperCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm="slaney" , mel_scale="slaney" , )
def _UpperCamelCase ( self : Any ) -> Dict[str, Any]:
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : np.array , lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray:
UpperCAmelCase = spectrogram(
__a , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__a , log_mel="dB" , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> Optional[int]:
UpperCAmelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase = [0]
# randomly choose index for each part
UpperCAmelCase = np.random.choice(ranges[0] )
UpperCAmelCase = np.random.choice(ranges[1] )
UpperCAmelCase = np.random.choice(ranges[2] )
UpperCAmelCase = mel[idx_front : idx_front + chunk_frames, :]
UpperCAmelCase = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCAmelCase = mel[idx_back : idx_back + chunk_frames, :]
UpperCAmelCase = torch.tensor(mel[None, None, :] )
UpperCAmelCase = torch.nn.functional.interpolate(
__a , size=[chunk_frames, 6_4] , mode="bilinear" , align_corners=__a )
UpperCAmelCase = mel_shrink[0][0].numpy()
UpperCAmelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : np.array , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCAmelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCAmelCase = len(__a ) - max_length
UpperCAmelCase = np.random.randint(0 , overflow + 1 )
UpperCAmelCase = waveform[idx : idx + max_length]
UpperCAmelCase = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCAmelCase = self._np_extract_fbank_features(__a , self.mel_filters )
UpperCAmelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCAmelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCAmelCase = np.stack([mel, mel, mel, mel] , axis=0 )
UpperCAmelCase = False
else:
UpperCAmelCase = self._random_mel_fusion(__a , __a , __a )
UpperCAmelCase = True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented" )
else:
UpperCAmelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCAmelCase = int(max_length / len(__a ) )
UpperCAmelCase = np.stack(np.tile(__a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCAmelCase = int(max_length / len(__a ) )
UpperCAmelCase = np.stack(np.tile(__a , __a ) )
UpperCAmelCase = np.pad(__a , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
UpperCAmelCase = self._np_extract_fbank_features(__a , self.mel_filters )
UpperCAmelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
UpperCAmelCase = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] , lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase__ : str = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : str , ) -> BatchFeature:
UpperCAmelCase = truncation if truncation is not None else self.truncation
UpperCAmelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCAmelCase = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase = [np.asarray(__a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
UpperCAmelCase = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase = [np.asarray(__a )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCAmelCase = [
self._get_input_mel(__a , max_length if max_length else self.nb_max_samples , __a , __a )
for waveform in raw_speech
]
UpperCAmelCase = []
UpperCAmelCase = []
for mel, longer in padded_inputs:
input_mel.append(__a )
is_longer.append(__a )
if truncation == "fusion" and sum(__a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCAmelCase = np.random.randint(0 , len(__a ) )
UpperCAmelCase = True
if isinstance(input_mel[0] , __a ):
UpperCAmelCase = [np.asarray(__a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCAmelCase = [[longer] for longer in is_longer]
UpperCAmelCase = {"""input_features""": input_mel, """is_longer""": is_longer}
UpperCAmelCase = BatchFeature(__a )
if return_tensors is not None:
UpperCAmelCase = input_features.convert_to_tensors(__a )
return input_features
| 719 |
lowerCAmelCase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 1 | 0 |
import functools
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = len(_UpperCamelCase )
UpperCAmelCase = len(_UpperCamelCase )
@functools.cache
def min_distance(__A , __A ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _UpperCamelCase ) , 1 + min_distance(_UpperCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCAmelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase = False
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return 3_2
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Any ) -> Optional[int]:
return 1_0_0
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCamelCase ( self : int ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase = MultilingualCLIP(lowerCAmelCase__ )
UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _UpperCamelCase ( self : str ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
UpperCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=0 ) -> str:
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
UpperCAmelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _UpperCamelCase ( self : str ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ) -> int:
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase = 0
UpperCAmelCase = "a hat"
UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _lowerCAmelCase( __A , __A=() , __A=None , __A="no" , __A="29500" ):
UpperCAmelCase = False
UpperCAmelCase = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
UpperCAmelCase = True
elif "IPython" in sys.modules:
UpperCAmelCase = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
UpperCAmelCase = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , __A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
UpperCAmelCase = 8
UpperCAmelCase = PrepareForLaunch(__A , distributed_type="TPU" )
print(F"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__A , args=__A , nprocs=__A , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__A )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__A , master_addr="127.0.01" , master_port=__A , mixed_precision=__A ):
UpperCAmelCase = PrepareForLaunch(__A , distributed_type="MULTI_GPU" )
print(F"Launching training on {num_processes} GPUs." )
try:
start_processes(__A , args=__A , nprocs=__A , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__A )
def _lowerCAmelCase( __A , __A=() , __A=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__A , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
UpperCAmelCase = PrepareForLaunch(__A , debug=__A )
start_processes(__A , args=__A , nprocs=__A , start_method="fork" )
| 721 |
def _lowerCAmelCase( __A , __A ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase( __A , __A=0 ):
return sorted(__A , key=lambda __A : x[column] )
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , __A ):
for j in range(max(0 , i - 6 ) , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(__A , __A )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A )
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(__A , __A )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__A )
UpperCAmelCase = dis_between_closest_in_strip(
__A , len(__A ) , __A )
return min(__A , __A )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = column_based_sort(__A , column=0 )
UpperCAmelCase = column_based_sort(__A , column=1 )
return (
closest_pair_of_points_sqr(
__A , __A , __A )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 1 | 0 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __magic_name__ ( _snake_case ):
UpperCAmelCase = ['image_processor', 'tokenizer']
UpperCAmelCase = 'OwlViTImageProcessor'
UpperCAmelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict=None , **lowerCAmelCase__ : Union[str, Any] ) -> int:
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase__ , )
UpperCAmelCase = kwargs.pop("feature_extractor" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self : Tuple , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]="max_length" , lowerCAmelCase__ : int="np" , **lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or (isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(text[0] , lowerCAmelCase__ )):
UpperCAmelCase = [self.tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(text[0] , lowerCAmelCase__ ):
UpperCAmelCase = []
# Maximum number of queries across batch
UpperCAmelCase = max([len(lowerCAmelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase__ ) != max_num_queries:
UpperCAmelCase = t + [" "] * (max_num_queries - len(lowerCAmelCase__ ))
UpperCAmelCase = self.tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
encodings.append(lowerCAmelCase__ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
UpperCAmelCase = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
UpperCAmelCase = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
UpperCAmelCase = BatchEncoding()
UpperCAmelCase = input_ids
UpperCAmelCase = attention_mask
if query_images is not None:
UpperCAmelCase = BatchEncoding()
UpperCAmelCase = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ).pixel_values
UpperCAmelCase = query_pixel_values
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def _UpperCamelCase ( self : Any , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : List[Any] ) -> List[str]:
return self.image_processor.post_process(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : Any , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Tuple ) -> Optional[Any]:
return self.image_processor.post_process_object_detection(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : Any , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Any ) -> int:
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[int] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple ) -> Optional[Any]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _UpperCamelCase ( self : Any ) -> str:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase__ , )
return self.image_processor_class
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase__ , )
return self.image_processor
| 700 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __magic_name__ :
def __init__( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=1_3 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : List[str]=2_4 , lowerCAmelCase__ : Any=1_6 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : str=3_2 , lowerCAmelCase__ : Optional[Any]=5 , lowerCAmelCase__ : Optional[int]=4 , lowerCAmelCase__ : List[str]=3_7 , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Any=1_0 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : Optional[Any]=2 , ) -> Tuple:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = patch_size
UpperCAmelCase = max_length
UpperCAmelCase = num_mel_bins
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = frequency_stride
UpperCAmelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase = frequency_out_dimension * time_out_dimension
UpperCAmelCase = num_patches + 2
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, input_values, labels
def _UpperCamelCase ( self : int ) -> Tuple:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ASTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"input_values": input_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __snake_case , __snake_case , unittest.TestCase ):
UpperCAmelCase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> Union[str, Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
UpperCAmelCase = ASTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=3_7 )
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
pass
def _UpperCamelCase ( self : Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__UpperCamelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["input_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def _UpperCamelCase ( self : Any ) -> List[str]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ASTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _lowerCAmelCase( ):
UpperCAmelCase = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(lowercase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : str ) -> Optional[int]:
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase ( self : Tuple ) -> Tuple:
UpperCAmelCase = self.default_feature_extractor
UpperCAmelCase = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__UpperCamelCase )
UpperCAmelCase = self.default_feature_extractor
UpperCAmelCase , UpperCAmelCase = prepare_audio()
UpperCAmelCase = audio.squeeze().numpy()
UpperCAmelCase = feature_extractor(__UpperCamelCase , sampling_rate=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 701 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : str , **lowerCAmelCase__ : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Dict ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _UpperCamelCase ( self : int ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase__ )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertNotIn("labels" , lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _UpperCamelCase ( self : Any ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ["A long paragraph for summarization."]
UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = inputs["input_ids"]
UpperCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["Summary of the text.", "Another summary."]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
UpperCAmelCase = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
UpperCAmelCase = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 1 | 0 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __magic_name__ ( a__ ):
def _UpperCamelCase ( self : List[str] ) -> List[Any]:
UpperCAmelCase = SMALL_MODEL_IDENTIFIER
UpperCAmelCase = '''pt'''
UpperCAmelCase = '''tf'''
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowercase__ )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Dict ) -> Optional[int]:
UpperCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=lowercase__ )
model_tf.save_pretrained(lowercase__ )
def _UpperCamelCase ( self : List[str] ) -> List[Any]:
UpperCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
UpperCAmelCase = FeaturesManager.determine_framework(self.test_model , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase__ )
UpperCAmelCase = FeaturesManager.determine_framework(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase__ )
UpperCAmelCase = FeaturesManager.determine_framework(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self : str ) -> Union[str, Any]:
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase__ )
UpperCAmelCase = FeaturesManager.determine_framework(lowercase__ )
self.assertEqual(lowercase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase__ )
UpperCAmelCase = FeaturesManager.determine_framework(lowercase__ )
self.assertEqual(lowercase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowercase__ ):
UpperCAmelCase = FeaturesManager.determine_framework(lowercase__ )
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = MagicMock(return_value=lowercase__ )
with patch("transformers.onnx.features.is_tf_available" , lowercase__ ):
UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
UpperCAmelCase = MagicMock(return_value=lowercase__ )
with patch("transformers.onnx.features.is_torch_available" , lowercase__ ):
UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_tf )
# Both in environment -> use PyTorch
UpperCAmelCase = MagicMock(return_value=lowercase__ )
UpperCAmelCase = MagicMock(return_value=lowercase__ )
with patch("transformers.onnx.features.is_tf_available" , lowercase__ ), patch(
"transformers.onnx.features.is_torch_available" , lowercase__ ):
UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_pt )
# Both not in environment -> raise error
UpperCAmelCase = MagicMock(return_value=lowercase__ )
UpperCAmelCase = MagicMock(return_value=lowercase__ )
with patch("transformers.onnx.features.is_tf_available" , lowercase__ ), patch(
"transformers.onnx.features.is_torch_available" , lowercase__ ):
with self.assertRaises(lowercase__ ):
UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 702 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 0 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCamelCase ( self : int ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 3_8_0_1_5, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 2_5_5_0_6, "token_str": " accuser"},
] , )
UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 3_8_0_1_5,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 2_5_5_0_6,
"token_str": " accuser",
},
] , )
UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3_6_0_6, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2_9_4_1, "token_str": " Te"},
] , )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 3_5_6_7_6, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS"},
] , )
UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS"},
] , )
UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2_9_4_1, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3_6_0_6, "token_str": " Clara"},
] , )
UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
[
{
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
@require_torch
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(lowercase__ )
@slow
@require_tf
def _UpperCamelCase ( self : List[str] ) -> int:
UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(lowercase__ )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : Optional[Any] ) -> str:
UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 6_1_0, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1_5_7_3, "token_str": " Chris"},
] , )
UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2_2_0_1,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 1_2_7_9_0,
"token_str": " Lyon",
},
] , )
UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 1_3_6_0_6, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2_9_4_1, "token_str": " Te"},
] , )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
UpperCAmelCase = None
UpperCAmelCase = None
self.run_pipeline_test(lowercase__ , [] )
@require_tf
def _UpperCamelCase ( self : List[str] ) -> Dict:
UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
UpperCAmelCase = None
UpperCAmelCase = None
self.run_pipeline_test(lowercase__ , [] )
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ) -> str:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
UpperCAmelCase = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
UpperCAmelCase = fill_masker.tokenizer
UpperCAmelCase = fill_masker.model
UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
lowercase__ , [
[
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
],
[
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
],
] , )
with self.assertRaises(lowercase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowercase__ ):
fill_masker("This is" )
self.run_test_top_k(lowercase__ , lowercase__ )
self.run_test_targets(lowercase__ , lowercase__ )
self.run_test_top_k_targets(lowercase__ , lowercase__ )
self.fill_mask_with_duplicate_targets_and_top_k(lowercase__ , lowercase__ )
self.fill_mask_with_multiple_masks(lowercase__ , lowercase__ )
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict ) -> List[Any]:
UpperCAmelCase = tokenizer.get_vocab()
UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ , targets=lowercase__ )
UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase__ )
UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase__ ) )
# Call argument
UpperCAmelCase = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowercase__ )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase__ )
UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase__ ) )
# Score equivalence
UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowercase__ )
UpperCAmelCase = [top_mask["""token_str"""] for top_mask in outputs]
UpperCAmelCase = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase__ ) == set(lowercase__ ):
UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowercase__ )
UpperCAmelCase = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
# Raises with invalid
with self.assertRaises(lowercase__ ):
UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase__ ):
UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(lowercase__ ):
UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ) -> Optional[int]:
UpperCAmelCase = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ , top_k=2 )
UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
UpperCAmelCase = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
lowercase__ , [
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
] , )
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
def _UpperCamelCase ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int ) -> List[Any]:
UpperCAmelCase = tokenizer.get_vocab()
UpperCAmelCase = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
# top_k=2, ntargets=3
UpperCAmelCase = sorted(vocab.keys() )[:3]
UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=lowercase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase = [el["""token_str"""] for el in sorted(lowercase__ , key=lambda lowerCAmelCase__ : x["score"] , reverse=lowercase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase__ ).issubset(lowercase__ ):
UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=lowercase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ) -> Any:
UpperCAmelCase = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase = sorted(vocab.keys() )[:3]
UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=lowercase__ , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase__ ) , 3 )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ) -> Union[str, Any]:
UpperCAmelCase = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
lowercase__ , [
[
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
],
[
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
],
[
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
{"sequence": ANY(lowercase__ ), "score": ANY(lowercase__ ), "token": ANY(lowercase__ ), "token_str": ANY(lowercase__ )},
],
] , )
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """lxmert"""
UpperCAmelCase = {}
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : List[Any]=9_5_0_0 , lowerCAmelCase__ : Any=1_6_0_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : str=9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[Any]=2_0_4_8 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=6.67 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : List[Any] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 1 | 0 |
import numpy
class __magic_name__ :
def __init__( self : Dict , lowerCAmelCase__ : numpy.ndarray , lowerCAmelCase__ : numpy.ndarray ) -> Any:
UpperCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase = numpy.zeros(output_array.shape )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _UpperCamelCase ( self : Any ) -> int:
UpperCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : numpy.ndarray , lowerCAmelCase__ : int , lowerCAmelCase__ : bool ) -> Optional[Any]:
for iteration in range(1 , iterations + 1 ):
UpperCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"Iteration {iteration} Loss: {loss}" )
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : numpy.ndarray ) -> str:
UpperCAmelCase = input_arr
UpperCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCAmelCase( __A ):
return 1 / (1 + numpy.exp(-value ))
def _lowerCAmelCase( __A ):
return (value) * (1 - (value))
def _lowerCAmelCase( ):
UpperCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 704 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = 1
UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase = pre_numerator
UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase = cur_numerator
UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple=1_3 , lowerCAmelCase__ : Tuple=3_2 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : str=[1, 2, 1] , lowerCAmelCase__ : int=[2, 2, 4] , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Dict=2.0 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : int=True , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-5 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Union[str, Any]=1_0 , lowerCAmelCase__ : Optional[Any]=8 , ) -> Optional[int]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = patch_norm
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = is_training
UpperCAmelCase = scope
UpperCAmelCase = use_labels
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = encoder_stride
def _UpperCamelCase ( self : str ) -> str:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self : Optional[int] ) -> int:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
UpperCAmelCase = SwinvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(lowerCAmelCase__ )
UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ) -> Any:
UpperCAmelCase = SwinvaForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = SwinvaForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = SwinvaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self : Any ) -> List[Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCAmelCase = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = SwinvaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=3_7 )
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _UpperCamelCase ( self : Any ) -> Any:
pass
def _UpperCamelCase ( self : str ) -> Dict:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def _UpperCamelCase ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
UpperCAmelCase = outputs.attentions
UpperCAmelCase = len(self.model_tester.depths )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = config.window_size**2
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCAmelCase = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
UpperCAmelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCAmelCase__ ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> Dict:
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# Swinv2 has a different seq_length
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = reshaped_hidden_states[0].shape
UpperCAmelCase = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : str ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def _UpperCamelCase ( self : str ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _UpperCamelCase ( self : Any ) -> List[Any]:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SwinvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ) -> int:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : List[Any] ) -> str:
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase ( self : List[str] ) -> Dict:
UpperCAmelCase = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
lowerCAmelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
UpperCAmelCase = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 705 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
UpperCAmelCase = DDIMPipeline
UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase = False
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
UpperCAmelCase = DDIMScheduler()
UpperCAmelCase = {"unet": unet, "scheduler": scheduler}
return components
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int=0 ) -> Dict:
if str(__a ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(__a )
else:
UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase = self.get_dummy_inputs(__a )
UpperCAmelCase = pipe(**__a ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
UpperCAmelCase = np.array(
[1.0_0_0e0_0, 5.7_1_7e-0_1, 4.7_1_7e-0_1, 1.0_0_0e0_0, 0.0_0_0e0_0, 1.0_0_0e0_0, 3.0_0_0e-0_4, 0.0_0_0e0_0, 9.0_0_0e-0_4] )
UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def _UpperCamelCase ( self : int ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _UpperCamelCase ( self : Any ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def _UpperCamelCase ( self : Any ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _UpperCamelCase ( self : int ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = "google/ddpm-cifar10-32"
UpperCAmelCase = UNetaDModel.from_pretrained(__a )
UpperCAmelCase = DDIMScheduler()
UpperCAmelCase = DDIMPipeline(unet=__a , scheduler=__a )
ddim.to(__a )
ddim.set_progress_bar_config(disable=__a )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = ddim(generator=__a , eta=0.0 , output_type="numpy" ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
UpperCAmelCase = "google/ddpm-ema-bedroom-256"
UpperCAmelCase = UNetaDModel.from_pretrained(__a )
UpperCAmelCase = DDIMScheduler.from_pretrained(__a )
UpperCAmelCase = DDIMPipeline(unet=__a , scheduler=__a )
ddpm.to(__a )
ddpm.set_progress_bar_config(disable=__a )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = ddpm(generator=__a , output_type="numpy" ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
UpperCAmelCase = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 706 |
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.0_0_9
def _lowerCAmelCase( __A , __A="train" ):
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase( __A , __A=m ):
UpperCAmelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase( __A ):
UpperCAmelCase = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def _lowerCAmelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase = 0.000002
UpperCAmelCase = 0
UpperCAmelCase = 0
while True:
j += 1
UpperCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase = get_cost_derivative(i - 1 )
UpperCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCAmelCase( ):
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 1 | 0 |
import os
import sys
import unittest
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase__ = os.path.join(git_repo_path, "src", "transformers")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(_snake_case )
UpperCAmelCase = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(_snake_case , "tokenizers" )
UpperCAmelCase = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(_snake_case , "tensorflow_text" )
UpperCAmelCase = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(_snake_case , "sentencepiece_and_tokenizers" )
UpperCAmelCase = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(_snake_case , "sentencepiece_and_tensorflow_text" )
UpperCAmelCase = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(_snake_case , "sentencepiece_and_tokenizers_and_vision" )
def _UpperCamelCase ( self : str ) -> str:
UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , _snake_case )
self.assertIn("tensorflow_text" , _snake_case )
self.assertIn("sentencepiece_and_tokenizers" , _snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = create_dummy_object("CONSTANT" , "\'torch\'" )
self.assertEqual(_snake_case , "\nCONSTANT = None\n" )
UpperCAmelCase = create_dummy_object("function" , "\'torch\'" )
self.assertEqual(
_snake_case , "\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n" )
UpperCAmelCase = "\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n"
UpperCAmelCase = create_dummy_object("FakeClass" , "\'torch\'" )
self.assertEqual(_snake_case , _snake_case )
def _UpperCamelCase ( self : List[Any] ) -> Any:
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
UpperCAmelCase = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , _snake_case )
| 707 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _lowerCAmelCase( __A : int=None , __A : List[str]=None ):
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
metadata={"""help""": """The csv file to plot."""} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
UpperCAmelCase = list_field(
default=_lowerCAmelCase , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def _lowerCAmelCase( __A : List[str] ):
try:
int(__A )
return True
except ValueError:
return False
def _lowerCAmelCase( __A : Optional[int] ):
try:
float(__A )
return True
except ValueError:
return False
class __magic_name__ :
def __init__( self : List[str] , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
UpperCAmelCase = csv.DictReader(_lowerCAmelCase )
for row in reader:
UpperCAmelCase = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
UpperCAmelCase = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
UpperCAmelCase = float(row["result"] )
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase = plt.subplots()
UpperCAmelCase = "Time usage" if self.args.is_time else "Memory usage"
UpperCAmelCase = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]["bsz"] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]["seq_len"] ) )
UpperCAmelCase = self.result_dict[model_name]["result"]
((UpperCAmelCase) , (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCAmelCase , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase) , (UpperCAmelCase)) = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
UpperCAmelCase = np.asarray(_lowerCAmelCase , _lowerCAmelCase )[: len(_lowerCAmelCase )]
plt.scatter(
_lowerCAmelCase , _lowerCAmelCase , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(_lowerCAmelCase , _lowerCAmelCase , "--" )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(_lowerCAmelCase )
plt.xlabel(_lowerCAmelCase )
plt.ylabel(_lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _lowerCAmelCase( ):
UpperCAmelCase = HfArgumentParser(__A )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=__A )
plot.plot()
if __name__ == "__main__":
main()
| 708 |
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
class __magic_name__ : # Public class to implement a graph
def __init__( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ) -> None:
UpperCAmelCase = row
UpperCAmelCase = col
UpperCAmelCase = graph
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> None:
# Checking all 8 elements surrounding nth element
UpperCAmelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _a )
def _UpperCamelCase ( self : Any ) -> int: # And finally, count all islands.
UpperCAmelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_a , _a , _a )
count += 1
return count
| 709 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1 | 0 |
def _lowerCAmelCase( __A ):
if any(not isinstance(__UpperCamelCase , __UpperCamelCase ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__UpperCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__UpperCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 710 |
import unittest
import numpy as np
def _lowerCAmelCase( __A , __A , __A , __A = None , ):
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase = (
"Expected the same number of rows for A and B. "
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase = (
"Expected the same number of columns for B and C. "
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__A )
UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
UpperCAmelCase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.block([[a, b], [b.T, c]] )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _UpperCamelCase ( self : str ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1 | 0 |
def _lowerCAmelCase( __A , __A , __A , __A ):
if height >= 1:
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
move_disk(lowerCamelCase_ , lowerCamelCase_ )
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase( __A , __A ):
print("moving disk from" , lowerCamelCase_ , "to" , lowerCamelCase_ )
def _lowerCAmelCase( ):
UpperCAmelCase = int(input("Height of hanoi: " ).strip() )
move_tower(lowerCamelCase_ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 711 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 0 |
from __future__ import annotations
class __magic_name__ :
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> None:
UpperCAmelCase = data
UpperCAmelCase = None
UpperCAmelCase = None
def _lowerCAmelCase( __A ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _lowerCAmelCase( __A ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _lowerCAmelCase( __A ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _lowerCAmelCase( ): # Main function for testing.
UpperCAmelCase = Node(1 )
UpperCAmelCase = Node(2 )
UpperCAmelCase = Node(3 )
UpperCAmelCase = Node(4 )
UpperCAmelCase = Node(5 )
UpperCAmelCase = Node(6 )
UpperCAmelCase = Node(7 )
UpperCAmelCase = Node(8 )
UpperCAmelCase = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 712 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(__A , __A )
print("Processing..." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__A )} with {file_name}" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(F"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__A , "*.txt" ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__A ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(__A , F"{label_name}.jpg" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCAmelCase( __A , __A , __A = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(__A ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(__A )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(__A )
if flip_type == 1:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase( __A = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 1 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __magic_name__ ( __lowercase ):
UpperCAmelCase = 'gptj'
UpperCAmelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=5_0_4_0_0 , lowerCAmelCase__ : Any=2_0_4_8 , lowerCAmelCase__ : Dict=4_0_9_6 , lowerCAmelCase__ : Union[str, Any]=2_8 , lowerCAmelCase__ : Tuple=1_6 , lowerCAmelCase__ : Dict=6_4 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any="gelu_new" , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : int=0.0 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : str=5_0_2_5_6 , lowerCAmelCase__ : Union[str, Any]=5_0_2_5_6 , lowerCAmelCase__ : int=False , **lowerCAmelCase__ : Tuple , ) -> Union[str, Any]:
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = n_inner
UpperCAmelCase = rotary_dim
UpperCAmelCase = activation_function
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = use_cache
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A )
class __magic_name__ ( __lowercase ):
def __init__( self : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple = "default" , lowerCAmelCase__ : Any = None , lowerCAmelCase__ : Any = False , ) -> Dict:
super().__init__(__A , task=__A , patching_specs=__A , use_past=__A )
if not getattr(self._config , "pad_token_id" , __A ):
# TODO: how to do that better?
UpperCAmelCase = 0
@property
def _UpperCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__A , direction="inputs" )
UpperCAmelCase = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self._config.n_layer
@property
def _UpperCamelCase ( self : Dict ) -> int:
return self._config.n_head
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict = -1 , lowerCAmelCase__ : Optional[int] = -1 , lowerCAmelCase__ : List[str] = False , lowerCAmelCase__ : Tuple = None , ) -> Mapping[str, Any]:
UpperCAmelCase = super(__A , self ).generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase = common_inputs["attention_mask"]
if self.use_past:
UpperCAmelCase = ordered_inputs["attention_mask"].dtype
UpperCAmelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__A , __A , dtype=__A )] , dim=1 )
return ordered_inputs
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return 1_3
| 713 |
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase = str(abs(__A ) )
UpperCAmelCase = [list(__A ) for char in range(len(__A ) )]
for index in range(len(__A ) ):
num_transpositions[index].pop(__A )
return max(
int("".join(list(__A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 1 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __magic_name__ ( UpperCamelCase__ ):
def _UpperCamelCase ( self : Union[str, Any] ) -> int:
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , "tf_padding" ) )
self.parent.assertTrue(hasattr(__A , "depth_multiplier" ) )
class __magic_name__ :
def __init__( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int=1_3 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Tuple=3_2 , lowerCAmelCase__ : Union[str, Any]=0.25 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : List[Any]=1_0_2_4 , lowerCAmelCase__ : Dict=3_2 , lowerCAmelCase__ : Optional[int]="relu6" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[int]=1_0 , lowerCAmelCase__ : List[Any]=None , ) -> Optional[Any]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = depth_multiplier
UpperCAmelCase = min_depth
UpperCAmelCase = tf_padding
UpperCAmelCase = int(last_hidden_size * depth_multiplier )
UpperCAmelCase = output_stride
UpperCAmelCase = hidden_act
UpperCAmelCase = classifier_dropout_prob
UpperCAmelCase = use_labels
UpperCAmelCase = is_training
UpperCAmelCase = num_labels
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase ( self : Dict ) -> int:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ) -> int:
UpperCAmelCase = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str ) -> str:
UpperCAmelCase = self.num_labels
UpperCAmelCase = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : int ) -> Tuple:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
UpperCAmelCase = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : Dict ) -> List[Any]:
UpperCAmelCase = MobileNetVaModelTester(self )
UpperCAmelCase = MobileNetVaConfigTester(self , config_class=__A , has_text_modality=__A )
def _UpperCamelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def _UpperCamelCase ( self : Any ) -> int:
pass
def _UpperCamelCase ( self : List[Any] ) -> Dict:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__A )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _UpperCamelCase ( self : int ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ):
UpperCAmelCase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__A , __A ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = 2_6
self.assertEqual(len(__A ) , __A )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(__A , __A , __A )
def _UpperCamelCase ( self : Any ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _lowerCAmelCase( ):
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
UpperCAmelCase = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__A )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__A )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __A )
UpperCAmelCase = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 714 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _lowerCAmelCase( __A ):
UpperCAmelCase = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__A , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__A , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__A , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__A , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__A , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__A , type=__A , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__A , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _lowerCAmelCase( __A ):
if args.calibrator == "max":
UpperCAmelCase = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=__A )
UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__A )
quant_nn.QuantLinear.set_default_quant_desc_weight(__A )
def _lowerCAmelCase( __A , __A , __A=False , __A=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__A , ["embeddings"] , which="weight" , _disabled=__A )
if args.quant_disable:
set_quantizer_by_name(__A , [""] , _disabled=__A )
if args.quant_disable_keyword:
set_quantizer_by_name(__A , args.quant_disable_keyword , _disabled=__A )
if args.quant_disable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=__A )
if args.quant_enable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=__A )
if args.recalibrate_weights:
recalibrate_weights(__A )
if args.fuse_qkv:
fuse_qkv(__A , __A )
if args.clip_gelu:
clip_gelu(__A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__A )
def _lowerCAmelCase( __A ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _lowerCAmelCase( __A , __A ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__A )
def _lowerCAmelCase( __A , __A ):
def fusea(__A , __A , __A ):
for mod in [qq, qk, qv]:
if not hasattr(__A , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase = qq._amax.detach().item()
UpperCAmelCase = qk._amax.detach().item()
UpperCAmelCase = qv._amax.detach().item()
UpperCAmelCase = max(__A , __A , __A )
qq._amax.fill_(__A )
qk._amax.fill_(__A )
qv._amax.fill_(__A )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowerCAmelCase( __A , __A ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__A )
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase = mod.weight.shape[0]
UpperCAmelCase = mod._weight_quantizer._amax.detach()
UpperCAmelCase = torch.ones(__A , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__A , keepdims=__A ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase = amax
def _lowerCAmelCase( __A , __A=25 , __A=180 , __A=None ):
if ignore is None:
UpperCAmelCase = []
elif not isinstance(__A , __A ):
UpperCAmelCase = [ignore]
UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(__A , "weight" ):
continue
UpperCAmelCase = max(__A , len(__A ) )
for name, mod in model.named_modules():
UpperCAmelCase = getattr(__A , "_input_quantizer" , __A )
UpperCAmelCase = getattr(__A , "_weight_quantizer" , __A )
if not hasattr(__A , "weight" ):
continue
if type(__A ) in ignore:
continue
if [True for s in ignore if type(__A ) is str and s in name]:
continue
UpperCAmelCase = F"Act:{input_q.extra_repr()}"
UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(__A ) <= line_width:
logger.info(__A )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(__A , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = getattr(__A , __A , __A )
if quantizer_mod is not None:
assert hasattr(__A , __A )
setattr(__A , __A , __A )
else:
logger.warning(F"{name} has no {quantizer}" )
def _lowerCAmelCase( __A , __A , __A="both" , **__A ):
UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__A , __A , "_input_quantizer" , __A , __A )
if which in ["weight", "both"]:
set_quantizer(__A , __A , "_weight_quantizer" , __A , __A )
logger.info(__A )
def _lowerCAmelCase( __A , __A , **__A ):
for name, mod in model.named_modules():
if hasattr(__A , "_input_quantizer" ) or hasattr(__A , "_weight_quantizer" ):
for n in names:
if re.search(__A , __A ):
set_quantizers(__A , __A , **__A )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__A , __A ):
UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(__A , __A , __A )
logger.info(__A )
| 1 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def _lowerCAmelCase( __A ):
# getting number of pixels in the image
UpperCAmelCase , UpperCAmelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
UpperCAmelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCAmelCase__ = imread("image_data/lena.jpg", 1)
# convert to its negative
lowerCAmelCase__ = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 715 |
def _lowerCAmelCase( __A ):
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(__A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase__ = 1.0_54_57_18_17e-34 # unit of ℏ : J * s
lowerCAmelCase__ = 3e8 # unit of c : m * s^-1
def _lowerCAmelCase( __A , __A , __A ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
UpperCAmelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
UpperCAmelCase = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
UpperCAmelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 1 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """xlm-roberta"""
def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=3_0_5_2_2 , lowerCAmelCase__ : Optional[Any]=7_6_8 , lowerCAmelCase__ : str=1_2 , lowerCAmelCase__ : Dict=1_2 , lowerCAmelCase__ : int=3_0_7_2 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : List[Any]=5_1_2 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Optional[int]=1e-1_2 , lowerCAmelCase__ : Union[str, Any]=1 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Dict="absolute" , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : Any , ) -> Dict:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = classifier_dropout
class __magic_name__ ( _snake_case ):
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 717 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _lowerCAmelCase( __A ):
UpperCAmelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowerCAmelCase( ):
with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase( __A , __A ):
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def _lowerCAmelCase( __A=None ):
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(__A , "utils" )
UpperCAmelCase = {
backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |
from __future__ import annotations
import numpy as np
def _lowerCAmelCase( __A ):
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 718 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __magic_name__ ( _snake_case , _snake_case ):
UpperCAmelCase = """convnextv2"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=2_2_4 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_stages
UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_path_rate
UpperCAmelCase = image_size
UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 1 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['PoolFormerFeatureExtractor']
lowerCAmelCase__ = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 719 |
lowerCAmelCase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 1 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
UpperCAmelCase = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
UpperCAmelCase = dict(zip(_a , range(len(_a ) ) ) )
UpperCAmelCase = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
UpperCAmelCase = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6_0_0_0,
"return_attention_mask": False,
"do_normalize": True,
}
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , _a )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
# load decoder from hub
UpperCAmelCase = "hf-internal-testing/ngram-beam-search-decoder"
def _UpperCamelCase ( self : Any , **lowerCAmelCase__ : Any ) -> int:
UpperCAmelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(_a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_a )
def _UpperCamelCase ( self : List[str] , **lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_a )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : str ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_a )
def _UpperCamelCase ( self : List[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : int ) -> Tuple:
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _a )
def _UpperCamelCase ( self : Optional[Any] ) -> int:
UpperCAmelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(_a , "include" ):
WavaVecaProcessorWithLM(
tokenizer=_a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _UpperCamelCase ( self : List[Any] ) -> int:
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
UpperCAmelCase = floats_list((3, 1_0_0_0) )
UpperCAmelCase = feature_extractor(_a , return_tensors="np" )
UpperCAmelCase = processor(_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCamelCase ( self : List[str] ) -> List[Any]:
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
UpperCAmelCase = "This is a test string"
UpperCAmelCase = processor(text=_a )
UpperCAmelCase = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Optional[Any]=(2, 1_0, 1_6) , lowerCAmelCase__ : Tuple=7_7 ) -> int:
np.random.seed(_a )
return np.random.rand(*_a )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
UpperCAmelCase = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
UpperCAmelCase = processor.decode(_a )
UpperCAmelCase = decoder.decode_beams(_a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> int:
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
UpperCAmelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase = processor.batch_decode(_a )
else:
with get_context(_a ).Pool() as pool:
UpperCAmelCase = processor.batch_decode(_a , _a )
UpperCAmelCase = list(_a )
with get_context("fork" ).Pool() as p:
UpperCAmelCase = decoder.decode_beams_batch(_a , _a )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_a , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(_a , decoded_processor.logit_score )
self.assertListEqual(_a , decoded_processor.lm_score )
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = 1_5
UpperCAmelCase = -20.0
UpperCAmelCase = -4.0
UpperCAmelCase = processor.batch_decode(
_a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , )
UpperCAmelCase = decoded_processor_out.text
UpperCAmelCase = list(_a )
with get_context("fork" ).Pool() as pool:
UpperCAmelCase = decoder.decode_beams_batch(
_a , _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , )
UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_a , _a )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , _a )
self.assertTrue(np.array_equal(_a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _a , atol=1e-3 ) )
self.assertTrue(np.array_equal(_a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , _a , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[int] ) -> Any:
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = 2.0
UpperCAmelCase = 5.0
UpperCAmelCase = -20.0
UpperCAmelCase = True
UpperCAmelCase = processor.batch_decode(
_a , alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , )
UpperCAmelCase = decoded_processor_out.text
UpperCAmelCase = list(_a )
decoder.reset_params(
alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , )
with get_context("fork" ).Pool() as pool:
UpperCAmelCase = decoder.decode_beams_batch(
_a , _a , )
UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_a , _a )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , _a )
UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _a )
def _UpperCamelCase ( self : str ) -> int:
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
UpperCAmelCase = os.listdir(_a )
UpperCAmelCase = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_a , _a )
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = snapshot_download("hf-internal-testing/processor_with_lm" )
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(_a )
UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
UpperCAmelCase = os.listdir(_a )
UpperCAmelCase = os.listdir(_a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_a , _a )
def _UpperCamelCase ( self : str ) -> Optional[Any]:
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCAmelCase = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCAmelCase = floats_list((3, 1_0_0_0) )
UpperCAmelCase = processor_wavaveca(_a , return_tensors="np" )
UpperCAmelCase = processor_auto(_a , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = processor_wavaveca.batch_decode(_a )
UpperCAmelCase = processor_auto.batch_decode(_a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def _UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> Dict:
UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCAmelCase = self._get_dummy_logits()[0]
UpperCAmelCase = processor.decode(_a , output_word_offsets=_a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_a , _a ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = processor.batch_decode(_a , output_word_offsets=_a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_a , _a ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(_a , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCamelCase ( self : int ) -> List[str]:
import torch
UpperCAmelCase = load_dataset("common_voice" , "en" , split="train" , streaming=_a )
UpperCAmelCase = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
UpperCAmelCase = iter(_a )
UpperCAmelCase = next(_a )
UpperCAmelCase = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
UpperCAmelCase = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
UpperCAmelCase = model(_a ).logits.cpu().numpy()
UpperCAmelCase = processor.decode(logits[0] , output_word_offsets=_a )
UpperCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
UpperCAmelCase = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(_a , "word" ) ) , _a )
self.assertEqual(" ".join(self.get_from_offsets(_a , "word" ) ) , output.text )
# output times
UpperCAmelCase = torch.tensor(self.get_from_offsets(_a , "start_time" ) )
UpperCAmelCase = torch.tensor(self.get_from_offsets(_a , "end_time" ) )
# fmt: off
UpperCAmelCase = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
UpperCAmelCase = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) )
self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) )
| 720 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCAmelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase = False
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return 3_2
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Any ) -> Optional[int]:
return 1_0_0
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCamelCase ( self : int ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase = MultilingualCLIP(lowerCAmelCase__ )
UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _UpperCamelCase ( self : str ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
UpperCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=0 ) -> str:
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
UpperCAmelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _UpperCamelCase ( self : str ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ) -> int:
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase = 0
UpperCAmelCase = "a hat"
UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
UpperCAmelCase = [2, 4, 6, 8, 1_0, 1_2]
UpperCAmelCase = 1_0_0
self.assertEqual(kp.calc_profit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 2_1_0 )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
self.assertRaisesRegex(UpperCamelCase__ , "max_weight must greater than zero." )
def _UpperCamelCase ( self : List[Any] ) -> Optional[int]:
self.assertRaisesRegex(UpperCamelCase__ , "Weight can not be negative." )
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
self.assertRaisesRegex(UpperCamelCase__ , "Profit can not be negative." )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
self.assertRaisesRegex(UpperCamelCase__ , "max_weight must greater than zero." )
def _UpperCamelCase ( self : List[str] ) -> Tuple:
self.assertRaisesRegex(
UpperCamelCase__ , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 721 |
def _lowerCAmelCase( __A , __A ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase( __A , __A=0 ):
return sorted(__A , key=lambda __A : x[column] )
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , __A ):
for j in range(max(0 , i - 6 ) , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(__A , __A )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A )
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(__A , __A )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__A )
UpperCAmelCase = dis_between_closest_in_strip(
__A , len(__A ) , __A )
return min(__A , __A )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = column_based_sort(__A , column=0 )
UpperCAmelCase = column_based_sort(__A , column=1 )
return (
closest_pair_of_points_sqr(
__A , __A , __A )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 1 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCAmelCase( __A ):
UpperCAmelCase = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = [5, 11, 17, 23]
UpperCAmelCase = [256, 512, 1024, 1024]
UpperCAmelCase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase = 768
UpperCAmelCase = [1, 1, 1, 0.5]
UpperCAmelCase = [256, 512, 768, 768]
UpperCAmelCase = 150
UpperCAmelCase = 16
UpperCAmelCase = (1, 384, 384)
UpperCAmelCase = False
UpperCAmelCase = "project"
if "ade" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 768
UpperCAmelCase = [1, 1, 1, 0.5]
UpperCAmelCase = 150
UpperCAmelCase = 16
UpperCAmelCase = "huggingface/label-files"
UpperCAmelCase = "ade20k-id2label.json"
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def _lowerCAmelCase( __A ):
UpperCAmelCase = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__A , __A )
def _lowerCAmelCase( __A ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
UpperCAmelCase = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
UpperCAmelCase = name.replace("patch_embed" , "" )
if "pos_embed" in name:
UpperCAmelCase = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
UpperCAmelCase = name.replace("proj" , "projection" )
if "blocks" in name:
UpperCAmelCase = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
UpperCAmelCase = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
UpperCAmelCase = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
UpperCAmelCase = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
UpperCAmelCase = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
UpperCAmelCase = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
UpperCAmelCase = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
UpperCAmelCase = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
UpperCAmelCase = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
UpperCAmelCase = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
UpperCAmelCase = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
UpperCAmelCase = name.replace("conv1" , "convolution1" )
if "conv2" in name:
UpperCAmelCase = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
UpperCAmelCase = name.replace("pretrained" , "dpt" )
if "bn" in name:
UpperCAmelCase = name.replace("bn" , "batch_norm" )
if "head" in name:
UpperCAmelCase = name.replace("head" , "head.head" )
if "encoder.norm" in name:
UpperCAmelCase = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
UpperCAmelCase = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
UpperCAmelCase = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
UpperCAmelCase = name.replace(".." , "." )
if "stem.conv" in name:
UpperCAmelCase = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
UpperCAmelCase = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
UpperCAmelCase = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
UpperCAmelCase = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def _lowerCAmelCase( __A , __A ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase( ):
UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase , UpperCAmelCase = get_dpt_config(__A )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase = torch.load(__A , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__A )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(__A )
UpperCAmelCase = val
# read in qkv matrices
read_in_q_k_v(__A , __A )
# load HuggingFace model
UpperCAmelCase = DPTForSemanticSegmentation(__A ) if "ade" in checkpoint_url else DPTForDepthEstimation(__A )
model.load_state_dict(__A )
model.eval()
# Check outputs on an image
UpperCAmelCase = 480 if "ade" in checkpoint_url else 384
UpperCAmelCase = DPTImageProcessor(size=__A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(__A , return_tensors="pt" )
# forward pass
UpperCAmelCase = model(**__A ).logits if "ade" in checkpoint_url else model(**__A ).predicted_depth
if show_prediction:
UpperCAmelCase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=__A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__A )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__A )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 700 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __magic_name__ ( _UpperCAmelCase ):
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=None , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Optional[Any] ) -> Dict:
super().__init__(*lowercase__ , **lowercase__ )
if config is None:
assert isinstance(self.model , lowercase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
UpperCAmelCase = self.model.config
else:
UpperCAmelCase = config
UpperCAmelCase = data_args
UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , lowercase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
" padding.." )
if self.args.label_smoothing == 0:
UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase = label_smoothed_nll_loss
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if self.optimizer is None:
UpperCAmelCase = ["bias", "LayerNorm.weight"]
UpperCAmelCase = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase = Adafactor
UpperCAmelCase = {"scale_parameter": False, "relative_step": False}
else:
UpperCAmelCase = AdamW
UpperCAmelCase = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase = OSS(
params=lowercase__ , optim=lowercase__ , **lowercase__ , )
else:
UpperCAmelCase = optimizer_cls(lowercase__ , **lowercase__ )
if self.lr_scheduler is None:
UpperCAmelCase = self._get_lr_scheduler(lowercase__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str:
UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowercase__ )
return scheduler
def _UpperCamelCase ( self : Optional[int] ) -> str:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] ) -> Any:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase = model(**lowercase__ , use_cache=lowercase__ )[0]
UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCAmelCase = model(**lowercase__ , labels=lowercase__ , use_cache=lowercase__ )[:2]
else:
# compute label smoothed loss
UpperCAmelCase = model(**lowercase__ , use_cache=lowercase__ )[0]
UpperCAmelCase = torch.nn.functional.log_softmax(lowercase__ , dim=-1 )
UpperCAmelCase = self.loss_fn(lowercase__ , lowercase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
UpperCAmelCase = inputs.pop("labels" )
UpperCAmelCase = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
return loss
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] = None , ) -> str:
UpperCAmelCase = self._prepare_inputs(lowercase__ )
UpperCAmelCase = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **lowercase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs["max_length"] )
UpperCAmelCase = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> Tuple:
UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f" padded to `max_length`={max_length}" )
UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCAmelCase = tensor
return padded_tensor
| 701 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : str , **lowerCAmelCase__ : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Dict ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _UpperCamelCase ( self : int ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase__ )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertNotIn("labels" , lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _UpperCamelCase ( self : Any ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ["A long paragraph for summarization."]
UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = inputs["input_ids"]
UpperCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["Summary of the text.", "Another summary."]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
UpperCAmelCase = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
UpperCAmelCase = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 1 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , _snake_case ):
def _UpperCamelCase ( self : int ) -> Optional[Any]:
UpperCAmelCase = load_tool("text-to-speech" )
self.tool.setup()
def _UpperCamelCase ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase = self.tool("hey" )
UpperCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = self.tool("hey" )
UpperCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 702 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 0 |
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = word.split()
def justify(__A , __A , __A ) -> str:
UpperCAmelCase = max_width - width
UpperCAmelCase = len(__lowercase )
if len(__lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__lowercase ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase = []
for i in range(__lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__lowercase )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = 0
for word in words:
if width + len(__lowercase ) + len(__lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__lowercase )
width += len(__lowercase )
else:
# justify the line and add it to result
answer.append(justify(__lowercase , __lowercase , __lowercase ) )
# reset new line and new width
UpperCAmelCase = [word], len(__lowercase )
UpperCAmelCase = max_width - width - len(__lowercase )
answer.append(" ".join(__lowercase ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """lxmert"""
UpperCAmelCase = {}
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : List[Any]=9_5_0_0 , lowerCAmelCase__ : Any=1_6_0_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : str=9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[Any]=2_0_4_8 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=6.67 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : List[Any] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 1 | 0 |
from __future__ import annotations
def _lowerCAmelCase( __A ):
UpperCAmelCase = 2
UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__snake_case )
if n > 1:
factors.append(__snake_case )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = 1
UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase = pre_numerator
UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase = cur_numerator
UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
class __magic_name__ ( __a ):
UpperCAmelCase = ['''pixel_values''']
def __init__( self : Dict , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : int , ) -> Tuple:
super().__init__(**snake_case__ )
UpperCAmelCase = size if size is not None else {"shortest_edge": 2_2_4}
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
UpperCAmelCase = get_size_dict(snake_case__ , param_name="crop_size" )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_flip_channel_order
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PIL.Image.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ) -> Optional[Any]:
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase = get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : int , ) -> List[Any]:
UpperCAmelCase = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Dict , ) -> Optional[int]:
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
return flip_channel_order(snake_case__ , data_format=snake_case__ )
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ) -> Tuple:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(snake_case__ , param_name="crop_size" )
UpperCAmelCase = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase = [self.flip_channel_order(image=snake_case__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Tuple] = None ) -> Any:
UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case__ ):
UpperCAmelCase = target_sizes.numpy()
UpperCAmelCase = []
for idx in range(len(snake_case__ ) ):
UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case__ )
UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
UpperCAmelCase = logits.argmax(dim=1 )
UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 705 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __lowerCamelCase ):
UpperCAmelCase = ['''image_processor''', '''tokenizer''']
UpperCAmelCase = '''LayoutLMv3ImageProcessor'''
UpperCAmelCase = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self : int , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Dict=None , **lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
UpperCAmelCase = kwargs.pop("feature_extractor" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
def __call__( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : List[Any] = None , lowerCAmelCase__ : Union[str, Any] = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : List[Any] = True , lowerCAmelCase__ : Optional[Any] = False , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : Union[str, Any] = None , lowerCAmelCase__ : Tuple = 0 , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : int = False , lowerCAmelCase__ : int = False , lowerCAmelCase__ : Dict = False , lowerCAmelCase__ : Union[str, Any] = False , lowerCAmelCase__ : int = True , lowerCAmelCase__ : str = None , **lowerCAmelCase__ : str , ) -> List[str]:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=a_ , return_tensors=a_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a_ , a_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features["words"]
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel values
UpperCAmelCase = features.pop("pixel_values" )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(a_ , encoded_inputs["overflow_to_sample_mapping"] )
UpperCAmelCase = images
return encoded_inputs
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> Tuple:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a_ ) != len(a_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f" {len(a_ )} and {len(a_ )}" )
return images_with_overflow
def _UpperCamelCase ( self : Union[str, Any] , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[Any] ) -> int:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self : Optional[int] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : int ) -> Optional[int]:
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self : List[Any] ) -> Dict:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _UpperCamelCase ( self : Tuple ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 706 |
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.0_0_9
def _lowerCAmelCase( __A , __A="train" ):
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase( __A , __A=m ):
UpperCAmelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase( __A ):
UpperCAmelCase = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def _lowerCAmelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase = 0.000002
UpperCAmelCase = 0
UpperCAmelCase = 0
while True:
j += 1
UpperCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase = get_cost_derivative(i - 1 )
UpperCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCAmelCase( ):
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 1 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __magic_name__ ( _snake_case ):
UpperCAmelCase = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCAmelCase = True
UpperCAmelCase = """ml.p3.2xlarge"""
UpperCAmelCase = """accelerate_sagemaker_execution_role"""
UpperCAmelCase = """hf-sm"""
UpperCAmelCase = """us-east-1"""
UpperCAmelCase = 1
UpperCAmelCase = """accelerate-sagemaker-1"""
UpperCAmelCase = """1.6"""
UpperCAmelCase = """4.4"""
UpperCAmelCase = """train.py"""
UpperCAmelCase = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
UpperCAmelCase = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ) -> Tuple:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCAmelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , lowerCAmelCase__ )
assert isinstance(converted_args["do_train"] , lowerCAmelCase__ )
assert isinstance(converted_args["epochs"] , lowerCAmelCase__ )
assert isinstance(converted_args["learning_rate"] , lowerCAmelCase__ )
assert isinstance(converted_args["max_steps"] , lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 707 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 0 |
import json
import sys
def _lowerCAmelCase( __A : Any , __A : Any ):
with open(_A , encoding="utf-8" ) as f:
UpperCAmelCase = json.load(_A )
UpperCAmelCase = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_A ):
UpperCAmelCase = results[benchmark_name]
UpperCAmelCase = benchmark_name.split("/" )[-1]
output_md.append(F"### Benchmark: {benchmark_file_name}" )
UpperCAmelCase = "| metric |"
UpperCAmelCase = "|--------|"
UpperCAmelCase = "| new / old (diff) |"
for metric_name in sorted(_A ):
UpperCAmelCase = benchmark_res[metric_name]
UpperCAmelCase = metric_vals["new"]
UpperCAmelCase = metric_vals.get("old" , _A )
UpperCAmelCase = metric_vals.get("diff" , _A )
UpperCAmelCase = F" {new_val:f}" if isinstance(_A , (int, float) ) else "None"
if old_val is not None:
val_str += F" / {old_val:f}" if isinstance(_A , (int, float) ) else "None"
if dif_val is not None:
val_str += F" ({dif_val:f})" if isinstance(_A , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_A ) )
if __name__ == "__main__":
lowerCAmelCase__ = sys.argv[1]
lowerCAmelCase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 708 |
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
lowerCAmelCase__ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase__ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase__ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 709 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _lowerCAmelCase( __A ):
UpperCAmelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = emb.weight.shape
UpperCAmelCase = nn.Linear(__A , __A , bias=__A )
UpperCAmelCase = emb.weight.data
return lin_layer
def _lowerCAmelCase( __A ):
UpperCAmelCase = torch.load(__A , map_location="cpu" )
UpperCAmelCase = mam_aaa['args'] or mam_aaa['cfg']['model']
UpperCAmelCase = mam_aaa['model']
remove_ignore_keys_(__A )
UpperCAmelCase = state_dict['encoder.embed_tokens.weight'].shape[0]
UpperCAmelCase = MaMaaaConfig(
vocab_size=__A , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
UpperCAmelCase = state_dict['decoder.embed_tokens.weight']
UpperCAmelCase = MaMaaaForConditionalGeneration(__A )
model.model.load_state_dict(__A , strict=__A )
UpperCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 710 |
import unittest
import numpy as np
def _lowerCAmelCase( __A , __A , __A , __A = None , ):
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase = (
"Expected the same number of rows for A and B. "
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase = (
"Expected the same number of columns for B and C. "
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__A )
UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
UpperCAmelCase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.block([[a, b], [b.T, c]] )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _UpperCamelCase ( self : str ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1 | 0 |
from __future__ import annotations
def _lowerCAmelCase( __A ): # This function is recursive
UpperCAmelCase = len(__snake_case )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCAmelCase = array[0]
UpperCAmelCase = False
UpperCAmelCase = 1
UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCAmelCase = True
UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
UpperCAmelCase = longest_subsequence(__snake_case )
if len(__snake_case ) > len(__snake_case ):
UpperCAmelCase = temp_array
else:
i += 1
UpperCAmelCase = [element for element in array[1:] if element >= pivot]
UpperCAmelCase = [pivot, *longest_subsequence(__snake_case )]
if len(__snake_case ) > len(__snake_case ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCAmelCase( __A , __A , __A , __A , __A=True , __A="pt" ):
UpperCAmelCase = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(" " ) else {}
UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase_ , padding="max_length" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
def _lowerCAmelCase( __A , __A , __A=None , ):
UpperCAmelCase = input_ids.ne(lowerCamelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __magic_name__ ( lowercase_ ):
def __init__( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str]="train" , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Union[str, Any]="" , ) -> Optional[Any]:
super().__init__()
UpperCAmelCase = Path(lowerCamelCase_ ).joinpath(type_path + ".source" )
UpperCAmelCase = Path(lowerCamelCase_ ).joinpath(type_path + ".target" )
UpperCAmelCase = self.get_char_lens(self.src_file )
UpperCAmelCase = max_source_length
UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
UpperCAmelCase = tokenizer
UpperCAmelCase = prefix
if n_obs is not None:
UpperCAmelCase = self.src_lens[:n_obs]
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
def __len__( self : List[Any] ) -> Tuple:
return len(self.src_lens )
def __getitem__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Tuple:
UpperCAmelCase = index + 1 # linecache starts at 1
UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("\n" )
UpperCAmelCase = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
)
UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
UpperCAmelCase = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , "right" )
UpperCAmelCase = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , "right" )
UpperCAmelCase = source_inputs["""input_ids"""].squeeze()
UpperCAmelCase = target_inputs["""input_ids"""].squeeze()
UpperCAmelCase = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCamelCase ( lowerCAmelCase__ : Dict ) -> Any:
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : Any ) -> List[Any]:
UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
UpperCAmelCase = trim_batch(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ )
UpperCAmelCase = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def _lowerCAmelCase( __A ):
return list(itertools.chain.from_iterable(lowerCamelCase_ ) )
def _lowerCAmelCase( __A ):
UpperCAmelCase = get_git_info()
save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , "git_log.json" ) )
def _lowerCAmelCase( __A , __A , __A=4 , **__A ):
with open(lowerCamelCase_ , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ )
def _lowerCAmelCase( __A ):
with open(lowerCamelCase_ ) as f:
return json.load(lowerCamelCase_ )
def _lowerCAmelCase( ):
UpperCAmelCase = git.Repo(search_parent_directories=lowerCamelCase_ )
UpperCAmelCase = {
"""repo_id""": str(lowerCamelCase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def _lowerCAmelCase( __A , __A ):
return list(map(lowerCamelCase_ , lowerCamelCase_ ) )
def _lowerCAmelCase( __A , __A ):
with open(lowerCamelCase_ , "wb" ) as f:
return pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase( __A ):
def remove_articles(__A ):
return re.sub(r"\b(a|an|the)\b" , " " , lowerCamelCase_ )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = normalize_answer(lowerCamelCase_ ).split()
UpperCAmelCase = normalize_answer(lowerCamelCase_ ).split()
UpperCAmelCase = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ )
UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase = 1.0 * num_same / len(lowerCamelCase_ )
UpperCAmelCase = 1.0 * num_same / len(lowerCamelCase_ )
UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCAmelCase( __A , __A ):
return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ )
def _lowerCAmelCase( __A , __A ):
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
UpperCAmelCase = 0
for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ):
em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
em /= len(lowerCamelCase_ )
return {"em": em}
def _lowerCAmelCase( __A ):
return model_prefix.startswith("rag" )
def _lowerCAmelCase( __A , __A , __A ):
UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
continue
UpperCAmelCase = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p]
setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
return hparams, config
| 712 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(__A , __A )
print("Processing..." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__A )} with {file_name}" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(F"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__A , "*.txt" ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__A ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(__A , F"{label_name}.jpg" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCAmelCase( __A , __A , __A = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(__A ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(__A )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(__A )
if flip_type == 1:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase( __A = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 1 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __magic_name__ :
def __init__( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=1_3 , lowerCAmelCase__ : Dict=7 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Optional[int]=9_9 , lowerCAmelCase__ : List[Any]=6_4 , lowerCAmelCase__ : Dict=3_2 , lowerCAmelCase__ : List[Any]=5 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : List[str]=3_7 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : List[str]=1_6 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : List[Any]=None , ) -> Optional[int]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = embedding_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[str] ) -> List[str]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> Any:
UpperCAmelCase = MegatronBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
UpperCAmelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
UpperCAmelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ) -> List[str]:
UpperCAmelCase = MegatronBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
UpperCAmelCase = MegatronBertForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> Tuple:
UpperCAmelCase = MegatronBertForNextSentencePrediction(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
UpperCAmelCase = MegatronBertForPreTraining(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , next_sentence_label=UpperCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
UpperCAmelCase = MegatronBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Dict:
UpperCAmelCase = self.num_labels
UpperCAmelCase = MegatronBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] ) -> int:
UpperCAmelCase = self.num_labels
UpperCAmelCase = MegatronBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ) -> List[str]:
UpperCAmelCase = self.num_choices
UpperCAmelCase = MegatronBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCAmelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
# test_resize_embeddings = False
UpperCAmelCase = False
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> Optional[int]:
UpperCAmelCase = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = MegatronBertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def _UpperCamelCase ( self : int ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*UpperCamelCase_ )
def _UpperCamelCase ( self : Union[str, Any] ) -> str:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCamelCase_ )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCamelCase_ )
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCamelCase_ )
def _UpperCamelCase ( self : Optional[Any] ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCamelCase_ )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCamelCase_ )
def _UpperCamelCase ( self : str ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCamelCase_ )
def _UpperCamelCase ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCamelCase_ )
def _lowerCAmelCase( __A ):
return torch.tensor(
A__ , dtype=torch.long , device=A__ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def _UpperCamelCase ( self : Any ) -> Dict:
UpperCAmelCase = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
UpperCAmelCase = os.path.join(os.environ["MYDIR"] , UpperCamelCase_ )
UpperCAmelCase = MegatronBertModel.from_pretrained(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.half()
UpperCAmelCase = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
UpperCAmelCase = model(UpperCamelCase_ )[0]
UpperCAmelCase = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , UpperCamelCase_ )
UpperCAmelCase = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
UpperCAmelCase = output[0, ii, jj]
UpperCAmelCase = expected[3 * ii + jj]
UpperCAmelCase = "ii={} jj={} a={} b={}".format(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertTrue(math.isclose(UpperCamelCase_ , UpperCamelCase_ , rel_tol=UpperCamelCase_ , abs_tol=UpperCamelCase_ ) , msg=UpperCamelCase_ )
| 713 |
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase = str(abs(__A ) )
UpperCAmelCase = [list(__A ) for char in range(len(__A ) )]
for index in range(len(__A ) ):
num_transpositions[index].pop(__A )
return max(
int("".join(list(__A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 1 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : int ) -> Any:
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : Any , **lowerCAmelCase__ : Optional[int] ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : Dict ) -> List[Any]:
UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self : str ) -> Any:
UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[Any] ) -> Dict:
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase__ )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase__ , return_tensors="np" )
UpperCAmelCase = processor(images=lowerCAmelCase__ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> List[str]:
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase__ )
UpperCAmelCase = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase = [[1_7_6_4, 2_6_4_6]]
UpperCAmelCase = [[6_8_3, 1_0_2_4]]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase__ , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase__ , np.array(lowerCAmelCase__ ) , np.array(lowerCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(lowerCAmelCase__ ):
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase__ , np.array(lowerCAmelCase__ ) , np.array(lowerCAmelCase__ ) )
@require_vision
@require_tf
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : int ) -> Optional[Any]:
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase__ )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase__ , return_tensors="np" )
UpperCAmelCase = processor(images=lowerCAmelCase__ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase__ )
UpperCAmelCase = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase = [[1_7_6_4, 2_6_4_6]]
UpperCAmelCase = [[6_8_3, 1_0_2_4]]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase__ , tf.convert_to_tensor(lowerCAmelCase__ ) , tf.convert_to_tensor(lowerCAmelCase__ ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase__ , np.array(lowerCAmelCase__ ) , np.array(lowerCAmelCase__ ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase__ , np.array(lowerCAmelCase__ ) , np.array(lowerCAmelCase__ ) , return_tensors="tf" )
@require_vision
@require_torchvision
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Dict:
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : Any , **lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def _UpperCamelCase ( self : List[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : Optional[Any] ) -> int:
UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase__ )
UpperCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase = [tf.convert_to_tensor(lowerCAmelCase__ )]
UpperCAmelCase = [torch.tensor(lowerCAmelCase__ )]
UpperCAmelCase = [[1_7_6_4, 2_6_4_6]]
UpperCAmelCase = [[6_8_3, 1_0_2_4]]
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="tf" )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _UpperCamelCase ( self : List[Any] ) -> Dict:
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase__ )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase__ , return_tensors="pt" )["pixel_values"].numpy()
UpperCAmelCase = processor(images=lowerCAmelCase__ , return_tensors="pt" )["pixel_values"].numpy()
UpperCAmelCase = image_processor(lowerCAmelCase__ , return_tensors="tf" )["pixel_values"].numpy()
UpperCAmelCase = processor(images=lowerCAmelCase__ , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
| 714 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _lowerCAmelCase( __A ):
UpperCAmelCase = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__A , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__A , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__A , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__A , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__A , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__A , type=__A , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__A , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _lowerCAmelCase( __A ):
if args.calibrator == "max":
UpperCAmelCase = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=__A )
UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__A )
quant_nn.QuantLinear.set_default_quant_desc_weight(__A )
def _lowerCAmelCase( __A , __A , __A=False , __A=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__A , ["embeddings"] , which="weight" , _disabled=__A )
if args.quant_disable:
set_quantizer_by_name(__A , [""] , _disabled=__A )
if args.quant_disable_keyword:
set_quantizer_by_name(__A , args.quant_disable_keyword , _disabled=__A )
if args.quant_disable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=__A )
if args.quant_enable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=__A )
if args.recalibrate_weights:
recalibrate_weights(__A )
if args.fuse_qkv:
fuse_qkv(__A , __A )
if args.clip_gelu:
clip_gelu(__A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__A )
def _lowerCAmelCase( __A ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _lowerCAmelCase( __A , __A ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__A )
def _lowerCAmelCase( __A , __A ):
def fusea(__A , __A , __A ):
for mod in [qq, qk, qv]:
if not hasattr(__A , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase = qq._amax.detach().item()
UpperCAmelCase = qk._amax.detach().item()
UpperCAmelCase = qv._amax.detach().item()
UpperCAmelCase = max(__A , __A , __A )
qq._amax.fill_(__A )
qk._amax.fill_(__A )
qv._amax.fill_(__A )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowerCAmelCase( __A , __A ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__A )
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase = mod.weight.shape[0]
UpperCAmelCase = mod._weight_quantizer._amax.detach()
UpperCAmelCase = torch.ones(__A , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__A , keepdims=__A ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase = amax
def _lowerCAmelCase( __A , __A=25 , __A=180 , __A=None ):
if ignore is None:
UpperCAmelCase = []
elif not isinstance(__A , __A ):
UpperCAmelCase = [ignore]
UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(__A , "weight" ):
continue
UpperCAmelCase = max(__A , len(__A ) )
for name, mod in model.named_modules():
UpperCAmelCase = getattr(__A , "_input_quantizer" , __A )
UpperCAmelCase = getattr(__A , "_weight_quantizer" , __A )
if not hasattr(__A , "weight" ):
continue
if type(__A ) in ignore:
continue
if [True for s in ignore if type(__A ) is str and s in name]:
continue
UpperCAmelCase = F"Act:{input_q.extra_repr()}"
UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(__A ) <= line_width:
logger.info(__A )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(__A , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = getattr(__A , __A , __A )
if quantizer_mod is not None:
assert hasattr(__A , __A )
setattr(__A , __A , __A )
else:
logger.warning(F"{name} has no {quantizer}" )
def _lowerCAmelCase( __A , __A , __A="both" , **__A ):
UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__A , __A , "_input_quantizer" , __A , __A )
if which in ["weight", "both"]:
set_quantizer(__A , __A , "_weight_quantizer" , __A , __A )
logger.info(__A )
def _lowerCAmelCase( __A , __A , **__A ):
for name, mod in model.named_modules():
if hasattr(__A , "_input_quantizer" ) or hasattr(__A , "_weight_quantizer" ):
for n in names:
if re.search(__A , __A ):
set_quantizers(__A , __A , **__A )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__A , __A ):
UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(__A , __A , __A )
logger.info(__A )
| 1 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = VideoToVideoSDPipeline
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCAmelCase = False
# No `output_type`.
UpperCAmelCase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
UpperCAmelCase = CLIPTextModel(lowercase_ )
UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _UpperCamelCase ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any]=0 ) -> Optional[int]:
UpperCAmelCase = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**lowercase_ )
UpperCAmelCase = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = "np"
UpperCAmelCase = sd_pipe(**lowercase_ ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
UpperCAmelCase = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase ( self : Tuple ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_ , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCamelCase ( self : List[str] ) -> Dict:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _UpperCamelCase ( self : Any ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=lowercase_ )
UpperCAmelCase = video.to("cuda" )
UpperCAmelCase = "Spiderman is surfing"
UpperCAmelCase = pipe(lowercase_ , video=lowercase_ , generator=lowercase_ , num_inference_steps=3 , output_type="pt" ).frames
UpperCAmelCase = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 715 |
def _lowerCAmelCase( __A ):
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(__A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __magic_name__ :
def __init__( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any]=1_3 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Optional[int]=2_4 , lowerCAmelCase__ : List[Any]=1_6 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : int=3_2 , lowerCAmelCase__ : Optional[Any]=5 , lowerCAmelCase__ : Union[str, Any]=4 , lowerCAmelCase__ : int=3_7 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=1_0 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Optional[Any]=2 , ) -> List[str]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = patch_size
UpperCAmelCase = max_length
UpperCAmelCase = num_mel_bins
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = frequency_stride
UpperCAmelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase = frequency_out_dimension * time_out_dimension
UpperCAmelCase = num_patches + 2
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, input_values, labels
def _UpperCamelCase ( self : Any ) -> Tuple:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] ) -> str:
UpperCAmelCase = ASTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int ) -> Optional[int]:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"input_values": input_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> Union[str, Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
UpperCAmelCase = ASTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def _UpperCamelCase ( self : List[str] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Any:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def _UpperCamelCase ( self : Union[str, Any] ) -> Any:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase_ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self : Tuple ) -> Optional[Any]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ASTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def _lowerCAmelCase( ):
UpperCAmelCase = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(snake_case__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : Optional[int] ) -> List[str]:
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = self.default_feature_extractor
UpperCAmelCase = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCAmelCase_ )
UpperCAmelCase = self.default_feature_extractor
UpperCAmelCase , UpperCAmelCase = prepare_audio()
UpperCAmelCase = audio.squeeze().numpy()
UpperCAmelCase = feature_extractor(lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
UpperCAmelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 716 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 1 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
lowerCAmelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _lowerCAmelCase( __A ):
with open(__A , "rb" ) as f:
UpperCAmelCase = Image.open(__A )
return im.convert("RGB" )
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
default=_snake_case , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase = field(default=_snake_case , metadata={"""help""": """A folder containing the training data."""} )
UpperCAmelCase = field(default=_snake_case , metadata={"""help""": """A folder containing the validation data."""} )
UpperCAmelCase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCAmelCase = field(
default=_snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCAmelCase = field(
default=_snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_snake_case )} , )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
UpperCAmelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCAmelCase = field(default=_snake_case , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCAmelCase = field(
default=_snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _lowerCAmelCase( __A ):
UpperCAmelCase = torch.stack([example["pixel_values"] for example in examples] )
UpperCAmelCase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _lowerCAmelCase( ):
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , __A , __A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCAmelCase = {}
if data_args.train_dir is not None:
UpperCAmelCase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
UpperCAmelCase = os.path.join(data_args.validation_dir , "**" )
UpperCAmelCase = load_dataset(
"imagefolder" , data_files=__A , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __A ) and data_args.train_val_split > 0.0:
UpperCAmelCase = dataset['''train'''].train_test_split(data_args.train_val_split )
UpperCAmelCase = split['''train''']
UpperCAmelCase = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase = dataset['''train'''].features['''labels'''].names
UpperCAmelCase = {}, {}
for i, label in enumerate(__A ):
UpperCAmelCase = str(__A )
UpperCAmelCase = label
# Load the accuracy metric from the datasets package
UpperCAmelCase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__A ) , labelaid=__A , idalabel=__A , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
UpperCAmelCase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
UpperCAmelCase = image_processor.size['''shortest_edge''']
else:
UpperCAmelCase = (image_processor.size['''height'''], image_processor.size['''width'''])
UpperCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
UpperCAmelCase = Compose(
[
RandomResizedCrop(__A ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
UpperCAmelCase = Compose(
[
Resize(__A ),
CenterCrop(__A ),
ToTensor(),
normalize,
] )
def train_transforms(__A ):
UpperCAmelCase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(__A ):
UpperCAmelCase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCAmelCase = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__A )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__A )
# Initalize our trainer
UpperCAmelCase = Trainer(
model=__A , args=__A , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__A , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=__A )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics("eval" , __A )
trainer.save_metrics("eval" , __A )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
if __name__ == "__main__":
main()
| 717 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _lowerCAmelCase( __A ):
UpperCAmelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowerCAmelCase( ):
with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase( __A , __A ):
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def _lowerCAmelCase( __A=None ):
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(__A , "utils" )
UpperCAmelCase = {
backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |
import math
import qiskit
def _lowerCAmelCase( __A = 1 , __A = 1 , __A = 1 ):
if (
isinstance(__A , __A )
or isinstance(__A , __A )
or isinstance(__A , __A )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(__A ) != input_a)
or (math.floor(__A ) != input_a)
or (math.floor(__A ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
UpperCAmelCase = qiskit.QuantumRegister(4 , "qr" )
UpperCAmelCase = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
UpperCAmelCase = [input_a, input_a, carry_in]
UpperCAmelCase = qiskit.QuantumCircuit(__A , __A )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__A ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__A ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__A ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __A ) # measure the last two qbits
UpperCAmelCase = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase = qiskit.execute(__A , __A , shots=1000 )
return job.result().get_counts(__A )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 718 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __magic_name__ ( _snake_case , _snake_case ):
UpperCAmelCase = """convnextv2"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=2_2_4 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_stages
UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_path_rate
UpperCAmelCase = image_size
UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 1 | 0 |
'''simple docstring'''
from timeit import timeit
def _lowerCAmelCase( __A ):
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def _lowerCAmelCase( __A ):
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _lowerCAmelCase( ):
def do_benchmark(__A ) -> None:
UpperCAmelCase = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(__a ) = }" )
UpperCAmelCase = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=__a )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }" )
UpperCAmelCase = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=__a , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 719 |
lowerCAmelCase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 1 | 0 |
def _lowerCAmelCase( __A , __A , __A = 0 , __A = 0 ):
UpperCAmelCase = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCAmelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase = False
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return 3_2
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Any ) -> Optional[int]:
return 1_0_0
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCamelCase ( self : int ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase = MultilingualCLIP(lowerCAmelCase__ )
UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _UpperCamelCase ( self : str ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
UpperCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=0 ) -> str:
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
UpperCAmelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _UpperCamelCase ( self : str ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ) -> int:
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase = 0
UpperCAmelCase = "a hat"
UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Any=1_0 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : int=3_2 * 8 , lowerCAmelCase__ : Union[str, Any]=3_2 * 8 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Any=6_4 , ) -> List[Any]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = is_training
UpperCAmelCase = use_auxiliary_loss
UpperCAmelCase = num_queries
UpperCAmelCase = num_channels
UpperCAmelCase = min_size
UpperCAmelCase = max_size
UpperCAmelCase = num_labels
UpperCAmelCase = hidden_dim
UpperCAmelCase = hidden_dim
def _UpperCamelCase ( self : Any ) -> int:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase__ )
UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase__ )
UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase__ ) > 0.5
).float()
UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase__ ) > 0.5).long()
UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCAmelCase = self.num_queries
UpperCAmelCase = self.num_labels
UpperCAmelCase = [1, 1, 1, 1]
UpperCAmelCase = self.num_channels
UpperCAmelCase = 6_4
UpperCAmelCase = 1_2_8
UpperCAmelCase = self.hidden_dim
UpperCAmelCase = self.hidden_dim
UpperCAmelCase = self.hidden_dim
return config
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = output.encoder_hidden_states
UpperCAmelCase = output.pixel_decoder_hidden_states
UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) , config.decoder_layers )
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=False ) -> Dict:
with torch.no_grad():
UpperCAmelCase = MaskaFormerModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ )
UpperCAmelCase = model(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase__ , UpperCAmelCase__ )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] ) -> List[str]:
UpperCAmelCase = MaskaFormerForUniversalSegmentation(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
def comm_check_on_output(lowerCAmelCase__ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ )
UpperCAmelCase = model(UpperCAmelCase__ )
comm_check_on_output(UpperCAmelCase__ )
UpperCAmelCase = model(
pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ )
comm_check_on_output(UpperCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ ( lowercase__ , lowercase__ , unittest.TestCase ):
UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCAmelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : Union[str, Any] ) -> Any:
UpperCAmelCase = MaskaFormerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Optional[int] ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCAmelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _UpperCamelCase ( self : List[Any] ) -> Dict:
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _UpperCamelCase ( self : List[str] ) -> List[Any]:
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase ( self : int ) -> Tuple:
pass
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(UpperCAmelCase__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@slow
def _UpperCamelCase ( self : Dict ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase = MaskaFormerModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _UpperCamelCase ( self : Tuple ) -> Optional[int]:
UpperCAmelCase = (self.model_tester.min_size,) * 2
UpperCAmelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=UpperCAmelCase__ ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=UpperCAmelCase__ ),
'''class_labels''': torch.zeros(2 , 1_0 , device=UpperCAmelCase__ ).long(),
}
UpperCAmelCase = self.model_tester.get_config()
UpperCAmelCase = MaskaFormerForUniversalSegmentation(UpperCAmelCase__ ).to(UpperCAmelCase__ )
UpperCAmelCase = model(**UpperCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
UpperCAmelCase = model(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def _UpperCamelCase ( self : int ) -> str:
if not self.model_tester.is_training:
return
UpperCAmelCase = self.all_model_classes[1]
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
UpperCAmelCase = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ ).loss
loss.backward()
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = self.all_model_classes[1]
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
model.train()
UpperCAmelCase = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ )
UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1e-4
def _lowerCAmelCase( ):
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCamelCase ( self : Union[str, Any] ) -> Any:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
UpperCAmelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
UpperCAmelCase = model(**UpperCAmelCase__ )
UpperCAmelCase = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
UpperCAmelCase = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
UpperCAmelCase = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ ).eval()
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
UpperCAmelCase = model(**UpperCAmelCase__ )
# masks_queries_logits
UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
UpperCAmelCase = torch.tensor(UpperCAmelCase__ ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
# class_queries_logits
UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ ).eval()
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
UpperCAmelCase = inputs['''pixel_values'''].to(UpperCAmelCase__ )
UpperCAmelCase = [el.to(UpperCAmelCase__ ) for el in inputs['''mask_labels''']]
UpperCAmelCase = [el.to(UpperCAmelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCAmelCase = model(**UpperCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 721 |
def _lowerCAmelCase( __A , __A ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase( __A , __A=0 ):
return sorted(__A , key=lambda __A : x[column] )
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , __A ):
for j in range(max(0 , i - 6 ) , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(__A , __A )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A )
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(__A , __A )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__A )
UpperCAmelCase = dis_between_closest_in_strip(
__A , len(__A ) , __A )
return min(__A , __A )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = column_based_sort(__A , column=0 )
UpperCAmelCase = column_based_sort(__A , column=1 )
return (
closest_pair_of_points_sqr(
__A , __A , __A )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 1 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCAmelCase( __A ):
UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
UpperCAmelCase = key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
UpperCAmelCase = key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase = key[key.find("patch_embed" ) + len("patch_embed" )]
UpperCAmelCase = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(_lowerCAmelCase )-1}" )
if "norm" in key:
UpperCAmelCase = key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
UpperCAmelCase = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(_lowerCAmelCase )-1}" )
if "layer_norm1" in key:
UpperCAmelCase = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
UpperCAmelCase = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase = key[key.find("block" ) + len("block" )]
UpperCAmelCase = key.replace(F"block{idx}" , F"block.{int(_lowerCAmelCase )-1}" )
if "attn.q" in key:
UpperCAmelCase = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
UpperCAmelCase = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
UpperCAmelCase = key.replace("attn" , "attention.self" )
if "fc1" in key:
UpperCAmelCase = key.replace("fc1" , "dense1" )
if "fc2" in key:
UpperCAmelCase = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
UpperCAmelCase = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
UpperCAmelCase = key.replace("linear_fuse.conv" , "linear_fuse" )
UpperCAmelCase = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase = key[key.find("linear_c" ) + len("linear_c" )]
UpperCAmelCase = key.replace(F"linear_c{idx}" , F"linear_c.{int(_lowerCAmelCase )-1}" )
if "bot_conv" in key:
UpperCAmelCase = key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
UpperCAmelCase = key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
UpperCAmelCase = key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
UpperCAmelCase = key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
UpperCAmelCase = key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
UpperCAmelCase = key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
UpperCAmelCase = key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
UpperCAmelCase = key.replace("module.last_layer_depth" , "head.head" )
UpperCAmelCase = value
return new_state_dict
def _lowerCAmelCase( __A , __A ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
UpperCAmelCase = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _lowerCAmelCase( ):
UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
@torch.no_grad()
def _lowerCAmelCase( __A , __A , __A=False , __A=None ):
UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase = GLPNImageProcessor()
# prepare image
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
UpperCAmelCase = torch.load(_lowerCAmelCase , map_location=torch.device("cpu" ) )
# rename keys
UpperCAmelCase = rename_keys(_lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_lowerCAmelCase , _lowerCAmelCase )
# create HuggingFace model and load state dict
UpperCAmelCase = GLPNForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# forward pass
UpperCAmelCase = model(_lowerCAmelCase )
UpperCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"Unknown model name: {model_name}" )
UpperCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
lowerCAmelCase__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 700 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __magic_name__ :
UpperCAmelCase = 42
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = """dict"""
UpperCAmelCase = None
UpperCAmelCase = field(default="""Translation""" , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def __call__( self : str ) -> List[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _UpperCamelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class __magic_name__ :
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = """dict"""
UpperCAmelCase = None
UpperCAmelCase = field(default="""TranslationVariableLanguages""" , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def _UpperCamelCase ( self : Any ) -> List[str]:
UpperCAmelCase = sorted(set(self.languages ) ) if self.languages else None
UpperCAmelCase = len(self.languages ) if self.languages else None
def __call__( self : List[str] ) -> Any:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : Optional[int] ) -> Dict:
UpperCAmelCase = set(self.languages )
if self.languages and set(__lowerCamelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__lowerCamelCase ) - lang_set ) )}) are not in valid set ({', '.join(__lowerCamelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCAmelCase = []
for lang, text in translation_dict.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCAmelCase = zip(*sorted(__lowerCamelCase ) )
return {"language": languages, "translation": translations}
def _UpperCamelCase ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 701 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : str , **lowerCAmelCase__ : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Dict ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _UpperCamelCase ( self : int ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase__ )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertNotIn("labels" , lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _UpperCamelCase ( self : Any ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ["A long paragraph for summarization."]
UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = inputs["input_ids"]
UpperCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["Summary of the text.", "Another summary."]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
UpperCAmelCase = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
UpperCAmelCase = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 1 | 0 |
import argparse
import os
import re
import packaging.version
lowerCAmelCase__ = "examples/"
lowerCAmelCase__ = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowerCAmelCase__ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowerCAmelCase__ = "README.md"
def _lowerCAmelCase( __A , __A , __A ):
with open(UpperCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace("VERSION" , UpperCamelCase__ )
UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(UpperCamelCase__ )
def _lowerCAmelCase( __A ):
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern="examples" )
def _lowerCAmelCase( __A , __A=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def _lowerCAmelCase( ):
UpperCAmelCase = "🤗 Transformers currently provides the following architectures"
UpperCAmelCase = "1. Want to contribute a new model?"
with open(UpperCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCamelCase__ )
def _lowerCAmelCase( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS["init"][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can\'t create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCAmelCase = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(F"Which version are you releasing? [{default_version}]" )
if len(UpperCamelCase__ ) == 0:
UpperCAmelCase = default_version
print(F"Updating version to {version}." )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print("Cleaning main README, don\'t forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCAmelCase( ):
UpperCAmelCase = get_version()
UpperCAmelCase = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(F"Which version are we developing now? [{dev_version}]" )
if len(UpperCamelCase__ ) == 0:
UpperCAmelCase = dev_version
print(F"Updating version to {version}." )
global_version_update(UpperCamelCase__ )
print("Cleaning main README, don\'t forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowerCAmelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 702 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __magic_name__ :
def __init__( self : int , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : Union[str, Any]=6_4 , lowerCAmelCase__ : Tuple=None ) -> Optional[Any]:
UpperCAmelCase = np.random.default_rng(lowercase_ )
UpperCAmelCase = length
UpperCAmelCase = rng.normal(size=(length,) ).astype(np.floataa )
UpperCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ) -> Dict:
return self.length
def __getitem__( self : List[str] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
return {"x": self.x[i], "y": self.y[i]}
class __magic_name__ ( torch.nn.Module ):
def __init__( self : Any , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : List[Any]=False ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase = True
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Union[str, Any]=None ) -> Union[str, Any]:
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
UpperCAmelCase = False
return x * self.a[0] + self.b[0]
class __magic_name__ ( torch.nn.Module ):
def __init__( self : Any , lowerCAmelCase__ : List[str]=0 , lowerCAmelCase__ : Optional[int]=0 , lowerCAmelCase__ : str=False ) -> Tuple:
super().__init__()
UpperCAmelCase = torch.nn.Parameter(torch.tensor(lowercase_ ).float() )
UpperCAmelCase = torch.nn.Parameter(torch.tensor(lowercase_ ).float() )
UpperCAmelCase = True
def _UpperCamelCase ( self : str , lowerCAmelCase__ : int=None ) -> List[str]:
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
UpperCAmelCase = False
return x * self.a + self.b
def _lowerCAmelCase( __A , __A = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
UpperCAmelCase = load_dataset("csv" , data_files=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = datasets["train"].unique("label" )
UpperCAmelCase = {v: i for i, v in enumerate(__SCREAMING_SNAKE_CASE )}
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length" )
if "label" in examples:
UpperCAmelCase = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(tokenized_datasets["train"] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=2 )
UpperCAmelCase = DataLoader(tokenized_datasets["validation"] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=1 )
return train_dataloader, eval_dataloader
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """lxmert"""
UpperCAmelCase = {}
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : List[Any]=9_5_0_0 , lowerCAmelCase__ : Any=1_6_0_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : str=9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[Any]=2_0_4_8 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=6.67 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : List[Any] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 1 | 0 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _lowerCAmelCase( __A ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def _lowerCAmelCase( __A ):
UpperCAmelCase = np.max(_outputs , axis=-1 , keepdims=__A )
UpperCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__A )
class __magic_name__ ( _UpperCAmelCase ):
UpperCAmelCase = '''sigmoid'''
UpperCAmelCase = '''softmax'''
UpperCAmelCase = '''none'''
@add_end_docstrings(
_UpperCAmelCase , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class __magic_name__ ( _UpperCAmelCase ):
UpperCAmelCase = False
UpperCAmelCase = ClassificationFunction.NONE
def __init__( self : Union[str, Any] , **lowerCAmelCase__ : int ) -> Tuple:
super().__init__(**A_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _UpperCamelCase ( self : str , lowerCAmelCase__ : int=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any="" , **lowerCAmelCase__ : Union[str, Any] ) -> str:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
UpperCAmelCase = tokenizer_kwargs
UpperCAmelCase = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
UpperCAmelCase = self.model.config.return_all_scores
if isinstance(A_ , A_ ) or top_k is None:
UpperCAmelCase = top_k
UpperCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , A_ , )
if return_all_scores:
UpperCAmelCase = None
else:
UpperCAmelCase = 1
if isinstance(A_ , A_ ):
UpperCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
UpperCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Any , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple ) -> Optional[int]:
UpperCAmelCase = super().__call__(*A_ , **A_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
UpperCAmelCase = "top_k" not in kwargs
if isinstance(args[0] , A_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Optional[Any] ) -> Dict[str, GenericTensor]:
UpperCAmelCase = self.framework
if isinstance(A_ , A_ ):
return self.tokenizer(**A_ , return_tensors=A_ , **A_ )
elif isinstance(A_ , A_ ) and len(A_ ) == 1 and isinstance(inputs[0] , A_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=A_ , **A_ )
elif isinstance(A_ , A_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(A_ , return_tensors=A_ , **A_ )
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
return self.model(**A_ )
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=True ) -> Union[str, Any]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
UpperCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
UpperCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
UpperCAmelCase = self.model.config.function_to_apply
else:
UpperCAmelCase = ClassificationFunction.NONE
UpperCAmelCase = model_outputs["logits"][0]
UpperCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
UpperCAmelCase = sigmoid(A_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
UpperCAmelCase = softmax(A_ )
elif function_to_apply == ClassificationFunction.NONE:
UpperCAmelCase = outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
UpperCAmelCase = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(A_ )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCAmelCase__ : x["score"] , reverse=A_ )
if top_k is not None:
UpperCAmelCase = dict_scores[:top_k]
return dict_scores
| 704 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = 1
UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase = pre_numerator
UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase = cur_numerator
UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase__ = pytest.mark.integration
@require_faiss
class __magic_name__ ( __lowercase ):
def _UpperCamelCase ( self : Dict ) -> int:
UpperCAmelCase = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowerCAmelCase__ ) for x in np.arange(3_0 ).tolist()]} )
return dset
def _UpperCamelCase ( self : Tuple ) -> Any:
import faiss
UpperCAmelCase = self._create_dummy_dataset()
UpperCAmelCase = dset.map(
lambda lowerCAmelCase__ , lowerCAmelCase__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ )
UpperCAmelCase = dset.add_faiss_index("vecs" , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _UpperCamelCase ( self : int ) -> List[str]:
import faiss
UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCAmelCase = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
import faiss
UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase__ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
UpperCAmelCase = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowerCAmelCase__ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _UpperCamelCase ( self : Optional[Any] ) -> int:
from elasticsearch import Elasticsearch
UpperCAmelCase = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCAmelCase = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 3_0 )
UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 2_9}]}}
UpperCAmelCase = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowerCAmelCase__ )
UpperCAmelCase = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class __magic_name__ ( __lowercase ):
def _UpperCamelCase ( self : Optional[int] ) -> str:
import faiss
UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
UpperCAmelCase = 1
UpperCAmelCase = index.search(lowerCAmelCase__ )
self.assertRaises(lowerCAmelCase__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
UpperCAmelCase = index.search_batch(lowerCAmelCase__ )
self.assertRaises(lowerCAmelCase__ , index.search_batch , queries[0] )
UpperCAmelCase = [scores[0] for scores in total_scores]
UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[Any] ) -> Any:
import faiss
UpperCAmelCase = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCAmelCase = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase__ ):
UpperCAmelCase = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
import faiss
UpperCAmelCase = faiss.IndexFlat(5 )
UpperCAmelCase = FaissIndex(custom_index=lowerCAmelCase__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _UpperCamelCase ( self : Any ) -> Any:
import faiss
UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase__ ) as tmp_file:
index.save(tmp_file.name )
UpperCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
UpperCAmelCase = 1
UpperCAmelCase = index.search(lowerCAmelCase__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _lowerCAmelCase( __A ):
import faiss
UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCAmelCase = "index.faiss"
UpperCAmelCase = F"mock://{index_name}"
index.save(_lowerCamelCase , storage_options=mockfs.storage_options )
UpperCAmelCase = FaissIndex.load(_lowerCamelCase , storage_options=mockfs.storage_options )
UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
UpperCAmelCase = 1
UpperCAmelCase = index.search(_lowerCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __magic_name__ ( __lowercase ):
def _UpperCamelCase ( self : List[str] ) -> str:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCAmelCase = Elasticsearch()
UpperCAmelCase = {"acknowledged": True}
UpperCAmelCase = ElasticSearchIndex(es_client=lowerCAmelCase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
UpperCAmelCase = "foo"
UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCAmelCase = index.search(lowerCAmelCase__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCAmelCase = "foo"
UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCAmelCase = index.search(lowerCAmelCase__ , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCAmelCase = ["foo", "bar", "foobar"]
UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCAmelCase = index.search_batch(lowerCAmelCase__ )
UpperCAmelCase = [scores[0] for scores in total_scores]
UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase__ )
# batched queries with timeout
UpperCAmelCase = ["foo", "bar", "foobar"]
UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCAmelCase = index.search_batch(lowerCAmelCase__ , request_timeout=3_0 )
UpperCAmelCase = [scores[0] for scores in total_scores]
UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase__ )
| 705 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1 | 0 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( _snake_case ):
UpperCAmelCase = ["""image_processor""", """tokenizer"""]
UpperCAmelCase = """FlavaImageProcessor"""
UpperCAmelCase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict=None , **lowerCAmelCase__ : Any ) -> str:
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCamelCase , )
UpperCAmelCase = kwargs.pop("feature_extractor" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase = self.image_processor
def __call__( self : List[str] , lowerCAmelCase__ : Optional[ImageInput] = None , lowerCAmelCase__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : Optional[int] , ) -> Optional[int]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if images is not None:
UpperCAmelCase = self.image_processor(
__UpperCamelCase , return_image_mask=__UpperCamelCase , return_codebook_pixels=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if text is not None and images is not None:
encoding.update(__UpperCamelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def _UpperCamelCase ( self : Any , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[Any] ) -> Any:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def _UpperCamelCase ( self : str ) -> Any:
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self : int ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , )
return self.image_processor_class
@property
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , )
return self.image_processor
| 706 |
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.0_0_9
def _lowerCAmelCase( __A , __A="train" ):
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase( __A , __A=m ):
UpperCAmelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase( __A ):
UpperCAmelCase = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def _lowerCAmelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase = 0.000002
UpperCAmelCase = 0
UpperCAmelCase = 0
while True:
j += 1
UpperCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase = get_cost_derivative(i - 1 )
UpperCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCAmelCase( ):
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 1 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 707 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 0 |
def _lowerCAmelCase( __A : float , __A : float , __A : int ):
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( a__ ):
def _UpperCamelCase ( self : Any ) -> Tuple:
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_ , "embed_dim" ) )
self.parent.assertTrue(hasattr(lowerCamelCase_ , "num_heads" ) )
class __magic_name__ :
def __init__( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : int=1_3 , lowerCAmelCase__ : int=6_4 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : List[Any]=[1_6, 4_8, 9_6] , lowerCAmelCase__ : Any=[1, 3, 6] , lowerCAmelCase__ : int=[1, 2, 1_0] , lowerCAmelCase__ : Union[str, Any]=[7, 3, 3] , lowerCAmelCase__ : List[str]=[4, 2, 2] , lowerCAmelCase__ : Optional[int]=[2, 1, 1] , lowerCAmelCase__ : Tuple=[2, 2, 2] , lowerCAmelCase__ : str=[False, False, True] , lowerCAmelCase__ : Optional[int]=[0.0, 0.0, 0.0] , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : List[str]=1e-1_2 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : str=2 , ) -> Optional[int]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_sizes
UpperCAmelCase = patch_stride
UpperCAmelCase = patch_padding
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = num_labels
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = num_heads
UpperCAmelCase = stride_kv
UpperCAmelCase = depth
UpperCAmelCase = cls_token
UpperCAmelCase = attention_drop_rate
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
def _UpperCamelCase ( self : Any ) -> List[str]:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self : List[str] ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
UpperCAmelCase = CvtModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase = model(lowerCamelCase_ )
UpperCAmelCase = (self.image_size, self.image_size)
UpperCAmelCase , UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _UpperCamelCase ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
UpperCAmelCase = self.num_labels
UpperCAmelCase = CvtForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : List[str] ) -> str:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : str ) -> Optional[Any]:
UpperCAmelCase = CvtModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=3_7 )
def _UpperCamelCase ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self : int ) -> Tuple:
return
@unittest.skip(reason="Cvt does not output attentions" )
def _UpperCamelCase ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def _UpperCamelCase ( self : Any ) -> List[str]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def _UpperCamelCase ( self : Tuple ) -> Tuple:
pass
def _UpperCamelCase ( self : Dict ) -> int:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCamelCase_ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def _UpperCamelCase ( self : str ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def _UpperCamelCase ( self : str ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str ):
UpperCAmelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def _UpperCamelCase ( self : int ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
pass
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CvtModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _lowerCAmelCase( ):
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCamelCase ( self : str ) -> str:
UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCamelCase_ , return_tensors="pt" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCAmelCase = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 709 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( lowercase_ , unittest.TestCase ):
UpperCAmelCase = UnCLIPImageVariationPipeline
UpperCAmelCase = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
UpperCAmelCase = IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
UpperCAmelCase = False
@property
def _UpperCamelCase ( self : List[str] ) -> List[str]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> Dict:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
return 1_0_0
@property
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> str:
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def _UpperCamelCase ( self : Any ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(UpperCamelCase__ )
@property
def _UpperCamelCase ( self : Any ) -> int:
torch.manual_seed(0 )
UpperCAmelCase = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
UpperCAmelCase = UnCLIPTextProjModel(**UpperCamelCase__ )
return model
@property
def _UpperCamelCase ( self : Optional[Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase = {
"sample_size": 3_2,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
UpperCAmelCase = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
torch.manual_seed(1 )
UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _UpperCamelCase ( self : Optional[int] ) -> Any:
UpperCAmelCase = self.dummy_decoder
UpperCAmelCase = self.dummy_text_proj
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_super_res_first
UpperCAmelCase = self.dummy_super_res_last
UpperCAmelCase = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1_0_0_0 , )
UpperCAmelCase = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1_0_0_0 , )
UpperCAmelCase = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
UpperCAmelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str=0 , lowerCAmelCase__ : Dict=True ) -> Union[str, Any]:
UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(UpperCamelCase__ )
else:
UpperCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
if pil_image:
UpperCAmelCase = input_image * 0.5 + 0.5
UpperCAmelCase = input_image.clamp(0 , 1 )
UpperCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase = DiffusionPipeline.numpy_to_pil(UpperCamelCase__ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**UpperCamelCase__ )
UpperCAmelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
UpperCAmelCase = pipe(**UpperCamelCase__ )
UpperCAmelCase = output.images
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
UpperCAmelCase = pipe(
**UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**UpperCamelCase__ )
UpperCAmelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
UpperCAmelCase = pipe(**UpperCamelCase__ )
UpperCAmelCase = output.images
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
UpperCAmelCase = pipe(
**UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : str ) -> str:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**UpperCamelCase__ )
UpperCAmelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
UpperCAmelCase = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
UpperCAmelCase = pipe(**UpperCamelCase__ )
UpperCAmelCase = output.images
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
UpperCAmelCase = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
UpperCAmelCase = pipe(
**UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : int ) -> List[Any]:
UpperCAmelCase = torch.device("cpu" )
class __magic_name__ :
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**UpperCamelCase__ )
UpperCAmelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
UpperCAmelCase = pipe.decoder.dtype
UpperCAmelCase = 1
UpperCAmelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCAmelCase = pipe.prepare_latents(
UpperCamelCase__ , dtype=UpperCamelCase__ , device=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , scheduler=DummyScheduler() )
UpperCAmelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCAmelCase = pipe.prepare_latents(
UpperCamelCase__ , dtype=UpperCamelCase__ , device=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , scheduler=DummyScheduler() )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
UpperCAmelCase = pipe(
**UpperCamelCase__ , decoder_latents=UpperCamelCase__ , super_res_latents=UpperCamelCase__ ).images
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
# Don't pass image, instead pass embedding
UpperCAmelCase = pipeline_inputs.pop("image" )
UpperCAmelCase = pipe.image_encoder(UpperCamelCase__ ).image_embeds
UpperCAmelCase = pipe(
**UpperCamelCase__ , decoder_latents=UpperCamelCase__ , super_res_latents=UpperCamelCase__ , image_embeddings=UpperCamelCase__ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _UpperCamelCase ( self : List[str] ) -> str:
UpperCAmelCase = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCAmelCase = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , expected_max_diff=UpperCamelCase__ )
@skip_mps
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase = torch_device == "cpu"
UpperCAmelCase = True
UpperCAmelCase = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , additional_params_copy_to_batched_inputs=UpperCamelCase__ , )
def _UpperCamelCase ( self : List[str] ) -> Any:
UpperCAmelCase = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCAmelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=UpperCamelCase__ , additional_params_copy_to_batched_inputs=UpperCamelCase__ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=UpperCamelCase__ )
@skip_mps
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return super().test_save_load_local()
@skip_mps
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : List[str] ) -> int:
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
UpperCAmelCase = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase = pipeline(
UpperCamelCase__ , generator=UpperCamelCase__ , output_type="np" , )
UpperCAmelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ , 1_5 )
| 710 |
import unittest
import numpy as np
def _lowerCAmelCase( __A , __A , __A , __A = None , ):
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase = (
"Expected the same number of rows for A and B. "
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase = (
"Expected the same number of columns for B and C. "
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__A )
UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
UpperCAmelCase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.block([[a, b], [b.T, c]] )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _UpperCamelCase ( self : str ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
UpperCAmelCase = BioGptTokenizer
UpperCAmelCase = False
def _UpperCamelCase ( self : Union[str, Any] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowerCAmelCase_ ) )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : str ) -> Tuple:
UpperCAmelCase = "lower newer"
UpperCAmelCase = "lower newer"
return input_text, output_text
def _UpperCamelCase ( self : Optional[int] ) -> Any:
UpperCAmelCase = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase = "lower"
UpperCAmelCase = ["low", "er</w>"]
UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = tokens + ["<unk>"]
UpperCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 711 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 0 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase( __A , __A , __A ):
# Initialise PyTorch model
UpperCAmelCase = RemBertConfig.from_json_file(a_ )
print("Building PyTorch model from configuration: {}".format(str(a_ ) ) )
UpperCAmelCase = RemBertModel(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(a_ , a_ , a_ )
# Save pytorch-model
print("Save PyTorch model to {}".format(a_ ) )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 712 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(__A , __A )
print("Processing..." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__A )} with {file_name}" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(F"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__A , "*.txt" ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__A ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(__A , F"{label_name}.jpg" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCAmelCase( __A , __A , __A = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(__A ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(__A )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(__A )
if flip_type == 1:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase( __A = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 1 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = args.log_outputs
UpperCAmelCase = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase = load_metric("wer" )
UpperCAmelCase = load_metric("cer" )
# compute metrics
UpperCAmelCase = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase = F"WER: {wer_result}\nCER: {cer_result}"
print(UpperCAmelCase__ )
with open(F"{dataset_id}_eval_results.txt" , "w" ) as f:
f.write(UpperCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase = F"log_{dataset_id}_predictions.txt"
UpperCAmelCase = F"log_{dataset_id}_targets.txt"
with open(UpperCAmelCase__ , "w" ) as p, open(UpperCAmelCase__ , "w" ) as t:
# mapping function to write output
def write_to_file(__A , __A ):
p.write(F"{i}" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F"{i}" + "\n" )
t.write(batch["target"] + "\n" )
result.map(UpperCAmelCase__ , with_indices=UpperCAmelCase__ )
def _lowerCAmelCase( __A ):
UpperCAmelCase = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase = re.sub(UpperCAmelCase__ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase = " ".join(text.split(UpperCAmelCase__ ) )
return text
def _lowerCAmelCase( __A ):
UpperCAmelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase = dataset.cast_column("audio" , Audio(sampling_rate=UpperCAmelCase__ ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase = 0 if torch.cuda.is_available() else -1
UpperCAmelCase = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__A ):
UpperCAmelCase = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase = prediction["text"]
UpperCAmelCase = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase = dataset.map(UpperCAmelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `\'en\'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `\'test\'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 713 |
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase = str(abs(__A ) )
UpperCAmelCase = [list(__A ) for char in range(len(__A ) )]
for index in range(len(__A ) ):
num_transpositions[index].pop(__A )
return max(
int("".join(list(__A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 1 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ = "\\n Text data.\n Second line of data."
lowerCAmelCase__ = "file"
@pytest.fixture(scope="session" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
UpperCAmelCase = bytes(UpperCAmelCase__ , "utf-8" )
with zstd.open(UpperCAmelCase__ , "wb" ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture
def _lowerCAmelCase( __A ):
with open(os.path.join(tmpfs.local_root_dir , UpperCAmelCase__ ) , "w" ) as f:
f.write(UpperCAmelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def _lowerCAmelCase( __A , __A , __A , __A , __A , __A ):
UpperCAmelCase = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
UpperCAmelCase = input_paths[compression_format]
UpperCAmelCase = tmp_path / "cache"
UpperCAmelCase = DownloadConfig(cache_dir=UpperCAmelCase__ , extract_compressed_file=UpperCAmelCase__ )
UpperCAmelCase = cached_path(UpperCAmelCase__ , download_config=UpperCAmelCase__ )
with open(UpperCAmelCase__ ) as f:
UpperCAmelCase = f.read()
with open(UpperCAmelCase__ ) as f:
UpperCAmelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = "custom_cache"
UpperCAmelCase = "custom_extracted_dir"
UpperCAmelCase = tmp_path / "custom_extracted_path"
if default_extracted:
UpperCAmelCase = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , UpperCAmelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(UpperCAmelCase__ ) )
UpperCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCAmelCase = xz_file
UpperCAmelCase = (
DownloadConfig(extract_compressed_file=UpperCAmelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCAmelCase__ )
)
UpperCAmelCase = cached_path(UpperCAmelCase__ , download_config=UpperCAmelCase__ )
assert Path(UpperCAmelCase__ ).parent.parts[-2:] == expected
def _lowerCAmelCase( __A ):
# absolute path
UpperCAmelCase = str(Path(UpperCAmelCase__ ).resolve() )
assert cached_path(UpperCAmelCase__ ) == text_file
# relative path
UpperCAmelCase = str(Path(UpperCAmelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCAmelCase__ ) == text_file
def _lowerCAmelCase( __A ):
# absolute path
UpperCAmelCase = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(UpperCAmelCase__ ):
cached_path(UpperCAmelCase__ )
# relative path
UpperCAmelCase = "./__missing_file__.txt"
with pytest.raises(UpperCAmelCase__ ):
cached_path(UpperCAmelCase__ )
def _lowerCAmelCase( __A ):
UpperCAmelCase = get_from_cache(F"tmp://{tmpfs_file}" )
with open(UpperCAmelCase__ ) as f:
UpperCAmelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def _lowerCAmelCase( ):
with pytest.raises(UpperCAmelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def _lowerCAmelCase( __A ):
UpperCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCAmelCase__ ):
http_get("https://huggingface.co" , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def _lowerCAmelCase( __A ):
UpperCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCAmelCase__ ):
ftp_get("ftp://huggingface.co" , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def _lowerCAmelCase( __A ):
UpperCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCAmelCase__ ):
fsspec_get("s3://huggingface.co" , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
fsspec_head("s3://huggingface.co" )
| 714 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _lowerCAmelCase( __A ):
UpperCAmelCase = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__A , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__A , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__A , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__A , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__A , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__A , type=__A , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__A , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _lowerCAmelCase( __A ):
if args.calibrator == "max":
UpperCAmelCase = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=__A )
UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__A )
quant_nn.QuantLinear.set_default_quant_desc_weight(__A )
def _lowerCAmelCase( __A , __A , __A=False , __A=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__A , ["embeddings"] , which="weight" , _disabled=__A )
if args.quant_disable:
set_quantizer_by_name(__A , [""] , _disabled=__A )
if args.quant_disable_keyword:
set_quantizer_by_name(__A , args.quant_disable_keyword , _disabled=__A )
if args.quant_disable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=__A )
if args.quant_enable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=__A )
if args.recalibrate_weights:
recalibrate_weights(__A )
if args.fuse_qkv:
fuse_qkv(__A , __A )
if args.clip_gelu:
clip_gelu(__A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__A )
def _lowerCAmelCase( __A ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _lowerCAmelCase( __A , __A ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__A )
def _lowerCAmelCase( __A , __A ):
def fusea(__A , __A , __A ):
for mod in [qq, qk, qv]:
if not hasattr(__A , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase = qq._amax.detach().item()
UpperCAmelCase = qk._amax.detach().item()
UpperCAmelCase = qv._amax.detach().item()
UpperCAmelCase = max(__A , __A , __A )
qq._amax.fill_(__A )
qk._amax.fill_(__A )
qv._amax.fill_(__A )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowerCAmelCase( __A , __A ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__A )
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase = mod.weight.shape[0]
UpperCAmelCase = mod._weight_quantizer._amax.detach()
UpperCAmelCase = torch.ones(__A , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__A , keepdims=__A ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase = amax
def _lowerCAmelCase( __A , __A=25 , __A=180 , __A=None ):
if ignore is None:
UpperCAmelCase = []
elif not isinstance(__A , __A ):
UpperCAmelCase = [ignore]
UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(__A , "weight" ):
continue
UpperCAmelCase = max(__A , len(__A ) )
for name, mod in model.named_modules():
UpperCAmelCase = getattr(__A , "_input_quantizer" , __A )
UpperCAmelCase = getattr(__A , "_weight_quantizer" , __A )
if not hasattr(__A , "weight" ):
continue
if type(__A ) in ignore:
continue
if [True for s in ignore if type(__A ) is str and s in name]:
continue
UpperCAmelCase = F"Act:{input_q.extra_repr()}"
UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(__A ) <= line_width:
logger.info(__A )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(__A , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = getattr(__A , __A , __A )
if quantizer_mod is not None:
assert hasattr(__A , __A )
setattr(__A , __A , __A )
else:
logger.warning(F"{name} has no {quantizer}" )
def _lowerCAmelCase( __A , __A , __A="both" , **__A ):
UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__A , __A , "_input_quantizer" , __A , __A )
if which in ["weight", "both"]:
set_quantizer(__A , __A , "_weight_quantizer" , __A , __A )
logger.info(__A )
def _lowerCAmelCase( __A , __A , **__A ):
for name, mod in model.named_modules():
if hasattr(__A , "_input_quantizer" ) or hasattr(__A , "_weight_quantizer" ):
for n in names:
if re.search(__A , __A ):
set_quantizers(__A , __A , **__A )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__A , __A ):
UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(__A , __A , __A )
logger.info(__A )
| 1 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class __magic_name__ :
def __init__( self : Union[str, Any] , lowerCAmelCase__ : int ) -> Union[str, Any]:
UpperCAmelCase = value
UpperCAmelCase = None
UpperCAmelCase = None
class __magic_name__ :
def __init__( self : Any , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
UpperCAmelCase = tree
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : str ) -> Union[str, Any]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
def _lowerCAmelCase( __A ):
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(__A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase__ = None
try:
import msvcrt
except ImportError:
lowerCAmelCase__ = None
try:
import fcntl
except ImportError:
lowerCAmelCase__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase__ = OSError
# Data
# ------------------------------------------------
lowerCAmelCase__ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowerCAmelCase__ = """3.0.12"""
lowerCAmelCase__ = None
def _lowerCAmelCase( ):
global _logger
UpperCAmelCase = _logger or logging.getLogger(__name__ )
return _logger
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase__ : int ) -> List[str]:
UpperCAmelCase = lock_file
return None
def __str__( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase = f"The file lock \'{self.lock_file}\' could not be acquired."
return temp
class __magic_name__ :
def __init__( self : List[str] , lowerCAmelCase__ : Any ) -> List[Any]:
UpperCAmelCase = lock
return None
def __enter__( self : Any ) -> Optional[Any]:
return self.lock
def __exit__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict ) -> Optional[Any]:
self.lock.release()
return None
class __magic_name__ :
def __init__( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]=-1 , lowerCAmelCase__ : Union[str, Any]=None ) -> Any:
UpperCAmelCase = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
UpperCAmelCase = self.hash_filename_if_too_long(lowerCAmelCase__ , lowerCAmelCase__ )
# The path to the lock file.
UpperCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCAmelCase = None
# The default timeout value.
UpperCAmelCase = timeout
# We use this lock primarily for the lock counter.
UpperCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCAmelCase = 0
return None
@property
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
return self._lock_file
@property
def _UpperCamelCase ( self : str ) -> Optional[int]:
return self._timeout
@timeout.setter
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> List[str]:
UpperCAmelCase = float(lowerCAmelCase__ )
return None
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
raise NotImplementedError()
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
raise NotImplementedError()
@property
def _UpperCamelCase ( self : Any ) -> List[Any]:
return self._lock_file_fd is not None
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str=0.05 ) -> Union[str, Any]:
if timeout is None:
UpperCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCAmelCase = id(self )
UpperCAmelCase = self._lock_file
UpperCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(lowerCAmelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int=False ) -> Optional[int]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCAmelCase = id(self )
UpperCAmelCase = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
UpperCAmelCase = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : int ) -> List[Any]:
self.acquire()
return self
def __exit__( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]:
self.release()
return None
def __del__( self : List[Any] ) -> List[Any]:
self.release(force=lowerCAmelCase__ )
return None
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> Any:
UpperCAmelCase = os.path.basename(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > max_length and max_length > 0:
UpperCAmelCase = os.path.dirname(lowerCAmelCase__ )
UpperCAmelCase = str(hash(lowerCAmelCase__ ) )
UpperCAmelCase = filename[: max_length - len(lowerCAmelCase__ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return path
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]=-1 , lowerCAmelCase__ : List[str]=None ) -> int:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCAmelCase__ , timeout=lowerCAmelCase__ , max_filename_length=lowerCAmelCase__ )
UpperCAmelCase = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def _UpperCamelCase ( self : int ) -> Optional[int]:
UpperCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCAmelCase = os.open(self._lock_file , lowerCAmelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCAmelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCAmelCase__ )
else:
UpperCAmelCase = fd
return None
def _UpperCamelCase ( self : List[Any] ) -> Dict:
UpperCAmelCase = self._lock_file_fd
UpperCAmelCase = None
msvcrt.locking(lowerCAmelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCAmelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=-1 , lowerCAmelCase__ : Dict=None ) -> List[str]:
UpperCAmelCase = os.statvfs(os.path.dirname(lowerCAmelCase__ ) ).f_namemax
super().__init__(lowerCAmelCase__ , timeout=lowerCAmelCase__ , max_filename_length=lowerCAmelCase__ )
def _UpperCamelCase ( self : Any ) -> str:
UpperCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCAmelCase = os.open(self._lock_file , lowerCAmelCase__ )
try:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCAmelCase__ )
else:
UpperCAmelCase = fd
return None
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
UpperCAmelCase = self._lock_file_fd
UpperCAmelCase = None
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_UN )
os.close(lowerCAmelCase__ )
return None
class __magic_name__ ( __UpperCAmelCase ):
def _UpperCamelCase ( self : List[Any] ) -> str:
UpperCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCAmelCase = os.open(self._lock_file , lowerCAmelCase__ )
except OSError:
pass
else:
UpperCAmelCase = fd
return None
def _UpperCamelCase ( self : List[Any] ) -> int:
os.close(self._lock_file_fd )
UpperCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase__ = None
if msvcrt:
lowerCAmelCase__ = WindowsFileLock
elif fcntl:
lowerCAmelCase__ = UnixFileLock
else:
lowerCAmelCase__ = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 716 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 1 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase( __A , __A , __A , __A , ):
UpperCAmelCase = coefficient_matrix.shape
UpperCAmelCase = constant_matrix.shape
if rowsa != colsa:
UpperCAmelCase = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(__A )
if colsa != 1:
UpperCAmelCase = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(__A )
if rowsa != rowsa:
UpperCAmelCase = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(__A )
if len(__A ) != rowsa:
UpperCAmelCase = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"matrix but received {len(__A )} and {rowsa}"
)
raise ValueError(__A )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
UpperCAmelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCAmelCase = table.shape
strictly_diagonally_dominant(__A )
# Iterates the whole matrix for given number of times
for _ in range(__A ):
UpperCAmelCase = []
for row in range(__A ):
UpperCAmelCase = 0
for col in range(__A ):
if col == row:
UpperCAmelCase = table[row][col]
elif col == cols - 1:
UpperCAmelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCAmelCase = (temp + val) / denom
new_val.append(__A )
UpperCAmelCase = new_val
return [float(__A ) for i in new_val]
def _lowerCAmelCase( __A ):
UpperCAmelCase = table.shape
UpperCAmelCase = True
for i in range(0 , __A ):
UpperCAmelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _lowerCAmelCase( __A ):
UpperCAmelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowerCAmelCase( ):
with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase( __A , __A ):
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def _lowerCAmelCase( __A=None ):
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(__A , "utils" )
UpperCAmelCase = {
backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |
def _lowerCAmelCase( __A , __A ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __magic_name__ ( _snake_case , _snake_case ):
UpperCAmelCase = """convnextv2"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=2_2_4 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_stages
UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_path_rate
UpperCAmelCase = image_size
UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 1 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class __magic_name__ ( lowercase_ ):
UpperCAmelCase = '''xlm'''
UpperCAmelCase = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int]=3_0_1_4_5 , lowerCAmelCase__ : List[str]=2_0_4_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : List[Any]=1_6 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any=5_1_2 , lowerCAmelCase__ : List[str]=2_0_4_8**-0.5 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict="first" , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[int]=0 , lowerCAmelCase__ : Optional[int]=0 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : List[str]=0 , **lowerCAmelCase__ : int , ) -> Tuple:
UpperCAmelCase = vocab_size
UpperCAmelCase = emb_dim
UpperCAmelCase = n_layers
UpperCAmelCase = n_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = gelu_activation
UpperCAmelCase = sinusoidal_embeddings
UpperCAmelCase = causal
UpperCAmelCase = asm
UpperCAmelCase = n_langs
UpperCAmelCase = use_lang_emb
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = bos_index
UpperCAmelCase = eos_index
UpperCAmelCase = pad_index
UpperCAmelCase = unk_index
UpperCAmelCase = mask_index
UpperCAmelCase = is_encoder
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = embed_init_std
UpperCAmelCase = init_std
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_proj_to_labels
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = start_n_top
UpperCAmelCase = end_n_top
UpperCAmelCase = mask_token_id
UpperCAmelCase = lang_id
if "n_words" in kwargs:
UpperCAmelCase = kwargs["n_words"]
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
class __magic_name__ ( lowercase_ ):
@property
def _UpperCamelCase ( self : int ) -> Optional[int]:
if self.task == "multiple-choice":
UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 719 |
lowerCAmelCase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 1 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class __magic_name__ ( _snake_case ):
def __init__( self : Optional[int] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : str ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 720 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCAmelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase = False
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return 3_2
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Any ) -> Optional[int]:
return 1_0_0
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCamelCase ( self : int ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase = MultilingualCLIP(lowerCAmelCase__ )
UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _UpperCamelCase ( self : str ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
UpperCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=0 ) -> str:
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
UpperCAmelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _UpperCamelCase ( self : str ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ) -> int:
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase = 0
UpperCAmelCase = "a hat"
UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 | 0 |
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase__ = "path-to-your-trained-model"
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
lowerCAmelCase__ = "A photo of sks dog in a bucket"
lowerCAmelCase__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 721 |
def _lowerCAmelCase( __A , __A ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase( __A , __A=0 ):
return sorted(__A , key=lambda __A : x[column] )
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , __A ):
for j in range(max(0 , i - 6 ) , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(__A , __A )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A )
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(__A , __A )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__A )
UpperCAmelCase = dis_between_closest_in_strip(
__A , len(__A ) , __A )
return min(__A , __A )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = column_based_sort(__A , column=0 )
UpperCAmelCase = column_based_sort(__A , column=1 )
return (
closest_pair_of_points_sqr(
__A , __A , __A )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 1 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=0.2 , lowerCAmelCase__ : Union[str, Any]=0.2 ) -> Tuple:
UpperCAmelCase = bp_numa
UpperCAmelCase = bp_numa
UpperCAmelCase = bp_numa
UpperCAmelCase = conva_get[:2]
UpperCAmelCase = conva_get[2]
UpperCAmelCase = size_pa
UpperCAmelCase = rate_w
UpperCAmelCase = rate_t
UpperCAmelCase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCAmelCase = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : str ) -> Tuple:
# save model dict with pickle
UpperCAmelCase = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(lowerCAmelCase__ , "wb" ) as f:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"Model saved: {save_path}" )
@classmethod
def _UpperCamelCase ( cls : int , lowerCAmelCase__ : Dict ) -> Optional[Any]:
# read saved model
with open(lowerCAmelCase__ , "rb" ) as f:
UpperCAmelCase = pickle.load(lowerCAmelCase__ ) # noqa: S301
UpperCAmelCase = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
UpperCAmelCase = model_dic.get("size_pooling1" )
UpperCAmelCase = model_dic.get("num_bp1" )
UpperCAmelCase = model_dic.get("num_bp2" )
UpperCAmelCase = model_dic.get("num_bp3" )
UpperCAmelCase = model_dic.get("rate_weight" )
UpperCAmelCase = model_dic.get("rate_thre" )
# create model instance
UpperCAmelCase = CNN(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# modify model parameter
UpperCAmelCase = model_dic.get("w_conv1" )
UpperCAmelCase = model_dic.get("wkj" )
UpperCAmelCase = model_dic.get("vji" )
UpperCAmelCase = model_dic.get("thre_conv1" )
UpperCAmelCase = model_dic.get("thre_bp2" )
UpperCAmelCase = model_dic.get("thre_bp3" )
return conv_ins
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : str ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : int ) -> Optional[int]:
return round(lowerCAmelCase__ , 3 )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple ) -> str:
# convolution process
UpperCAmelCase = convs[0]
UpperCAmelCase = convs[1]
UpperCAmelCase = np.shape(lowerCAmelCase__ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase = []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase__ ):
UpperCAmelCase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCAmelCase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase = []
UpperCAmelCase = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCAmelCase__ ):
UpperCAmelCase = []
for i_focus in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCAmelCase__ ) )
UpperCAmelCase = np.asmatrix(lowerCAmelCase__ ).reshape(
lowerCAmelCase__ , lowerCAmelCase__ )
data_featuremap.append(lowerCAmelCase__ )
# expanding the data slice to One dimenssion
UpperCAmelCase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCAmelCase__ ) )
UpperCAmelCase = np.asarray(lowerCAmelCase__ )
return focus_list, data_featuremap
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase = len(featuremaps[0] )
UpperCAmelCase = int(size_map / size_pooling )
UpperCAmelCase = []
for i_map in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = featuremaps[i_map]
UpperCAmelCase = []
for i_focus in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
for j_focus in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCAmelCase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCAmelCase__ ) )
UpperCAmelCase = np.asmatrix(lowerCAmelCase__ ).reshape(lowerCAmelCase__ , lowerCAmelCase__ )
featuremap_pooled.append(lowerCAmelCase__ )
return featuremap_pooled
def _UpperCamelCase ( self : str , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
# expanding three dimension data to one dimension list
UpperCAmelCase = []
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = np.shape(data[i] )
UpperCAmelCase = data[i].reshape(1 , shapes[0] * shapes[1] )
UpperCAmelCase = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCAmelCase__ )
UpperCAmelCase = np.asarray(lowerCAmelCase__ )
return data_expanded
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : Dict ) -> str:
# expanding matrix to one dimension list
UpperCAmelCase = np.asarray(lowerCAmelCase__ )
UpperCAmelCase = np.shape(lowerCAmelCase__ )
UpperCAmelCase = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
UpperCAmelCase = []
UpperCAmelCase = 0
for i_map in range(lowerCAmelCase__ ):
UpperCAmelCase = np.ones((size_map, size_map) )
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
for j in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = pd_pool[
i_pool
]
UpperCAmelCase = i_pool + 1
UpperCAmelCase = np.multiply(
lowerCAmelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCAmelCase__ )
return pd_all
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=bool ) -> List[str]:
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(lowerCAmelCase__ )) )
print((" - - Shape: Teach_Data ", np.shape(lowerCAmelCase__ )) )
UpperCAmelCase = 0
UpperCAmelCase = []
UpperCAmelCase = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase = 0
print(f"-------------Learning Time {rp}--------------" )
for p in range(len(lowerCAmelCase__ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase = np.asmatrix(datas_train[p] )
UpperCAmelCase = np.asarray(datas_teach[p] )
UpperCAmelCase , UpperCAmelCase = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCAmelCase = self.pooling(lowerCAmelCase__ , self.size_poolinga )
UpperCAmelCase = np.shape(lowerCAmelCase__ )
UpperCAmelCase = self._expand(lowerCAmelCase__ )
UpperCAmelCase = data_bp_input
UpperCAmelCase = np.dot(lowerCAmelCase__ , self.vji.T ) - self.thre_bpa
UpperCAmelCase = self.sig(lowerCAmelCase__ )
UpperCAmelCase = np.dot(lowerCAmelCase__ , self.wkj.T ) - self.thre_bpa
UpperCAmelCase = self.sig(lowerCAmelCase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase = np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCAmelCase__ , (1 - bp_outa) ) )
UpperCAmelCase = np.multiply(
np.dot(lowerCAmelCase__ , self.wkj ) , np.multiply(lowerCAmelCase__ , (1 - bp_outa) ) )
UpperCAmelCase = np.dot(lowerCAmelCase__ , self.vji )
UpperCAmelCase = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase = self._calculate_gradient_from_pool(
lowerCAmelCase__ , lowerCAmelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase = self.rate_weight * np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase = rp + 1
UpperCAmelCase = error_count / patterns
all_mse.append(lowerCAmelCase__ )
def draw_error():
UpperCAmelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCAmelCase__ , "+-" )
plt.plot(lowerCAmelCase__ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(lowerCAmelCase__ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Union[str, Any]:
# model predict
UpperCAmelCase = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(lowerCAmelCase__ )) )
for p in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = np.asmatrix(datas_test[p] )
UpperCAmelCase , UpperCAmelCase = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCAmelCase = self.pooling(lowerCAmelCase__ , self.size_poolinga )
UpperCAmelCase = self._expand(lowerCAmelCase__ )
UpperCAmelCase = data_bp_input
UpperCAmelCase = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase = self.sig(lowerCAmelCase__ )
UpperCAmelCase = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase = self.sig(lowerCAmelCase__ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase = [list(map(self.do_round , lowerCAmelCase__ ) ) for each in produce_out]
return np.asarray(lowerCAmelCase__ )
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase = np.asmatrix(lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCAmelCase = self.pooling(lowerCAmelCase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 700 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 0 |
from datetime import datetime
import requests
def _lowerCAmelCase( __A ):
UpperCAmelCase = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
UpperCAmelCase = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(__A ).content
if __name__ == "__main__":
lowerCAmelCase__ = input("Enter Video/IGTV url: ").strip()
lowerCAmelCase__ = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 701 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : str , **lowerCAmelCase__ : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Dict ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _UpperCamelCase ( self : int ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase__ )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertNotIn("labels" , lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _UpperCamelCase ( self : Any ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ["A long paragraph for summarization."]
UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = inputs["input_ids"]
UpperCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["Summary of the text.", "Another summary."]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
UpperCAmelCase = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
UpperCAmelCase = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 1 | 0 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = len(__A )
UpperCAmelCase = len(matrix[0] )
UpperCAmelCase = min(__A , __A )
for row in range(__A ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __A ):
UpperCAmelCase = matrix[col][row] / matrix[row][row]
for i in range(__A , __A ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCAmelCase = True
for i in range(row + 1 , __A ):
if matrix[i][row] != 0:
UpperCAmelCase , UpperCAmelCase = matrix[i], matrix[row]
UpperCAmelCase = False
break
if reduce:
rank -= 1
for i in range(__A ):
UpperCAmelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """roformer"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str]=5_0_0_0_0 , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Union[str, Any]=7_6_8 , lowerCAmelCase__ : Dict=1_2 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Dict=1_5_3_6 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Tuple=1e-1_2 , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : int , ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size if embedding_size is None else embedding_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = rotary_value
UpperCAmelCase = use_cache
class __magic_name__ ( _snake_case ):
@property
def _UpperCamelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "sequence"}
UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """lxmert"""
UpperCAmelCase = {}
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : List[Any]=9_5_0_0 , lowerCAmelCase__ : Any=1_6_0_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : str=9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[Any]=2_0_4_8 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=6.67 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : List[Any] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 1 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase__ = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": 512,
}
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = LxmertTokenizer
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : List[str]="[UNK]" , lowerCAmelCase__ : Dict="[SEP]" , lowerCAmelCase__ : Any="[PAD]" , lowerCAmelCase__ : List[str]="[CLS]" , lowerCAmelCase__ : Union[str, Any]="[MASK]" , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any=None , **lowerCAmelCase__ : Optional[int] , ) -> List[str]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**lowerCAmelCase__ )
UpperCAmelCase = do_lower_case
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None ) -> Optional[Any]:
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 704 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = 1
UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase = pre_numerator
UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase = cur_numerator
UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 705 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCAmelCase__ = datasets.utils.logging.get_logger(__name__)
lowerCAmelCase__ = ["names", "prefix"]
lowerCAmelCase__ = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowerCAmelCase__ = ["encoding_errors", "on_bad_lines"]
lowerCAmelCase__ = ["date_format"]
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
UpperCAmelCase = ""","""
UpperCAmelCase = None
UpperCAmelCase = """infer"""
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = False
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = """."""
UpperCAmelCase = None
UpperCAmelCase = """\""""
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 0
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = None
UpperCAmelCase = 10_000
UpperCAmelCase = None
UpperCAmelCase = """strict"""
UpperCAmelCase = """error"""
UpperCAmelCase = None
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
if self.delimiter is not None:
UpperCAmelCase = self.delimiter
if self.column_names is not None:
UpperCAmelCase = self.column_names
@property
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __magic_name__ ( datasets.ArrowBasedBuilder ):
UpperCAmelCase = CsvConfig
def _UpperCamelCase ( self : Dict ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> List[Any]:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
UpperCAmelCase = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = [files]
UpperCAmelCase = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = [files]
UpperCAmelCase = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
UpperCAmelCase = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
UpperCAmelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
UpperCAmelCase = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
UpperCAmelCase = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}" )
raise
| 706 |
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.0_0_9
def _lowerCAmelCase( __A , __A="train" ):
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase( __A , __A=m ):
UpperCAmelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase( __A ):
UpperCAmelCase = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def _lowerCAmelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase = 0.000002
UpperCAmelCase = 0
UpperCAmelCase = 0
while True:
j += 1
UpperCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase = get_cost_derivative(i - 1 )
UpperCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCAmelCase( ):
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 1 | 0 |
from __future__ import annotations
class __magic_name__ :
def __init__( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> Tuple:
UpperCAmelCase , UpperCAmelCase = text, pattern
UpperCAmelCase , UpperCAmelCase = len(lowerCAmelCase__ ), len(lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _UpperCamelCase ( self : int , lowerCAmelCase__ : int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCamelCase ( self : List[Any] ) -> list[int]:
# searches pattern in text and returns index positions
UpperCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCAmelCase = self.mismatch_in_text(lowerCAmelCase__ )
if mismatch_index == -1:
positions.append(lowerCAmelCase__ )
else:
UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
UpperCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase__ = "ABAABA"
lowerCAmelCase__ = "AB"
lowerCAmelCase__ = BoyerMooreSearch(text, pattern)
lowerCAmelCase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 707 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCAmelCase__ = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def _lowerCAmelCase( __A : Optional[Any] , __A : List[Any] ):
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
return (preds == labels).mean()
def _lowerCAmelCase( __A : str , __A : List[Any] ):
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
UpperCAmelCase = simple_accuracy(__A , __A )
UpperCAmelCase = fa_score(y_true=__A , y_pred=__A )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _lowerCAmelCase( __A : List[str] , __A : Optional[int] ):
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
UpperCAmelCase = pearsonr(__A , __A )[0]
UpperCAmelCase = spearmanr(__A , __A )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _lowerCAmelCase( __A : Tuple , __A : Optional[int] , __A : Optional[int] ):
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
assert len(__A ) == len(__A ), F"Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__A , __A )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "mrpc":
return acc_and_fa(__A , __A )
elif task_name == "sts-b":
return pearson_and_spearman(__A , __A )
elif task_name == "qqp":
return acc_and_fa(__A , __A )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__A , __A )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__A , __A )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "rte":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "hans":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
def _lowerCAmelCase( __A : int , __A : Tuple , __A : Optional[int] ):
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
if len(__A ) != len(__A ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
| 708 |
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures/dummy-config.json")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Dict ) -> List[Any]:
UpperCAmelCase = 0
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
UpperCAmelCase = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : str ) -> int:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : str ) -> Any:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase = AutoConfig.for_model("roberta" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : int ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCAmelCase = os.path.join(lowerCAmelCase__ , "fake-roberta" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(type(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
try:
AutoConfig.register("custom" , lowerCAmelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("model" , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("bert" , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase = AutoConfig.from_pretrained("bert-base" )
def _UpperCamelCase ( self : int ) -> int:
with self.assertRaisesRegex(
lowerCAmelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" )
def _UpperCamelCase ( self : Any ) -> Optional[int]:
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _UpperCamelCase ( self : str ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__ ):
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def _UpperCamelCase ( self : Tuple ) -> Tuple:
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """new-model"""
try:
AutoConfig.register("new-model" , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 709 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1 | 0 |
def _lowerCAmelCase( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase__ = generate_large_matrix()
lowerCAmelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _lowerCAmelCase( __A ):
assert all(row == sorted(__A , reverse=__A ) for row in grid )
assert all(list(__A ) == sorted(__A , reverse=__A ) for col in zip(*__A ) )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
UpperCAmelCase = len(__A ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase = (left + right) // 2
UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase = mid + 1
else:
UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
UpperCAmelCase = len(grid[0] )
for i in range(len(__A ) ):
UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(__A ) * len(grid[0] )) - total
def _lowerCAmelCase( __A ):
return len([number for row in grid for number in row if number < 0] )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(__A ):
if number < 0:
total += len(__A ) - i
break
return total
def _lowerCAmelCase( ):
from timeit import timeit
print("Running benchmarks" )
UpperCAmelCase = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase = timeit(F"{func}(grid=grid)" , setup=__A , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 710 |
import unittest
import numpy as np
def _lowerCAmelCase( __A , __A , __A , __A = None , ):
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase = (
"Expected the same number of rows for A and B. "
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase = (
"Expected the same number of columns for B and C. "
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__A )
UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
UpperCAmelCase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.block([[a, b], [b.T, c]] )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _UpperCamelCase ( self : str ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 711 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 0 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 712 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(__A , __A )
print("Processing..." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__A )} with {file_name}" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(F"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__A , "*.txt" ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__A ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(__A , F"{label_name}.jpg" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCAmelCase( __A , __A , __A = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(__A ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(__A )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(__A )
if flip_type == 1:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase( __A = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 1 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """blip_2_vision_model"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Dict=1_4_0_8 , lowerCAmelCase__ : Optional[int]=6_1_4_4 , lowerCAmelCase__ : Union[str, Any]=3_9 , lowerCAmelCase__ : str=1_6 , lowerCAmelCase__ : Dict=2_2_4 , lowerCAmelCase__ : str=1_4 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : Optional[int]=0.00_001 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : List[str]=1e-1_0 , lowerCAmelCase__ : List[str]=True , **lowerCAmelCase__ : List[str] , ) -> int:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = patch_size
UpperCAmelCase = image_size
UpperCAmelCase = initializer_range
UpperCAmelCase = attention_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = hidden_act
UpperCAmelCase = qkv_bias
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
UpperCAmelCase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """blip_2_qformer"""
def __init__( self : Any , lowerCAmelCase__ : Dict=3_0_5_2_2 , lowerCAmelCase__ : str=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Optional[int]="absolute" , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : List[str]=1_4_0_8 , **lowerCAmelCase__ : Tuple , ) -> Union[str, Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = cross_attention_frequency
UpperCAmelCase = encoder_hidden_size
@classmethod
def _UpperCamelCase ( cls : List[str] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
UpperCAmelCase = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """blip-2"""
UpperCAmelCase = True
def __init__( self : List[Any] , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Tuple=3_2 , **lowerCAmelCase__ : Any ) -> Optional[int]:
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
UpperCAmelCase = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
UpperCAmelCase = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
UpperCAmelCase = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCAmelCase = BlipaVisionConfig(**lowerCAmelCase__ )
UpperCAmelCase = BlipaQFormerConfig(**lowerCAmelCase__ )
UpperCAmelCase = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCAmelCase = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
UpperCAmelCase = self.text_config.tie_word_embeddings
UpperCAmelCase = self.text_config.is_encoder_decoder
UpperCAmelCase = num_query_tokens
UpperCAmelCase = self.vision_config.hidden_size
UpperCAmelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase = 1.0
UpperCAmelCase = 0.02
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , lowerCAmelCase__ : BlipaVisionConfig , lowerCAmelCase__ : BlipaQFormerConfig , lowerCAmelCase__ : PretrainedConfig , **lowerCAmelCase__ : int , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.qformer_config.to_dict()
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 713 |
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase = str(abs(__A ) )
UpperCAmelCase = [list(__A ) for char in range(len(__A ) )]
for index in range(len(__A ) ):
num_transpositions[index].pop(__A )
return max(
int("".join(list(__A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 1 | 0 |
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase__ = "path-to-your-trained-model"
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
lowerCAmelCase__ = "A photo of sks dog in a bucket"
lowerCAmelCase__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 714 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _lowerCAmelCase( __A ):
UpperCAmelCase = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__A , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__A , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__A , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__A , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__A , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__A , type=__A , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__A , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _lowerCAmelCase( __A ):
if args.calibrator == "max":
UpperCAmelCase = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=__A )
UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__A )
quant_nn.QuantLinear.set_default_quant_desc_weight(__A )
def _lowerCAmelCase( __A , __A , __A=False , __A=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__A , ["embeddings"] , which="weight" , _disabled=__A )
if args.quant_disable:
set_quantizer_by_name(__A , [""] , _disabled=__A )
if args.quant_disable_keyword:
set_quantizer_by_name(__A , args.quant_disable_keyword , _disabled=__A )
if args.quant_disable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=__A )
if args.quant_enable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=__A )
if args.recalibrate_weights:
recalibrate_weights(__A )
if args.fuse_qkv:
fuse_qkv(__A , __A )
if args.clip_gelu:
clip_gelu(__A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__A )
def _lowerCAmelCase( __A ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _lowerCAmelCase( __A , __A ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__A )
def _lowerCAmelCase( __A , __A ):
def fusea(__A , __A , __A ):
for mod in [qq, qk, qv]:
if not hasattr(__A , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase = qq._amax.detach().item()
UpperCAmelCase = qk._amax.detach().item()
UpperCAmelCase = qv._amax.detach().item()
UpperCAmelCase = max(__A , __A , __A )
qq._amax.fill_(__A )
qk._amax.fill_(__A )
qv._amax.fill_(__A )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowerCAmelCase( __A , __A ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__A )
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase = mod.weight.shape[0]
UpperCAmelCase = mod._weight_quantizer._amax.detach()
UpperCAmelCase = torch.ones(__A , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__A , keepdims=__A ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase = amax
def _lowerCAmelCase( __A , __A=25 , __A=180 , __A=None ):
if ignore is None:
UpperCAmelCase = []
elif not isinstance(__A , __A ):
UpperCAmelCase = [ignore]
UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(__A , "weight" ):
continue
UpperCAmelCase = max(__A , len(__A ) )
for name, mod in model.named_modules():
UpperCAmelCase = getattr(__A , "_input_quantizer" , __A )
UpperCAmelCase = getattr(__A , "_weight_quantizer" , __A )
if not hasattr(__A , "weight" ):
continue
if type(__A ) in ignore:
continue
if [True for s in ignore if type(__A ) is str and s in name]:
continue
UpperCAmelCase = F"Act:{input_q.extra_repr()}"
UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(__A ) <= line_width:
logger.info(__A )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(__A , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = getattr(__A , __A , __A )
if quantizer_mod is not None:
assert hasattr(__A , __A )
setattr(__A , __A , __A )
else:
logger.warning(F"{name} has no {quantizer}" )
def _lowerCAmelCase( __A , __A , __A="both" , **__A ):
UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__A , __A , "_input_quantizer" , __A , __A )
if which in ["weight", "both"]:
set_quantizer(__A , __A , "_weight_quantizer" , __A , __A )
logger.info(__A )
def _lowerCAmelCase( __A , __A , **__A ):
for name, mod in model.named_modules():
if hasattr(__A , "_input_quantizer" ) or hasattr(__A , "_weight_quantizer" ):
for n in names:
if re.search(__A , __A ):
set_quantizers(__A , __A , **__A )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__A , __A ):
UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(__A , __A , __A )
logger.info(__A )
| 1 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 715 |
def _lowerCAmelCase( __A ):
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(__A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCAmelCase__ = pytest.mark.integration
lowerCAmelCase__ = {"comet"}
lowerCAmelCase__ = importlib.util.find_spec("fairseq") is not None
lowerCAmelCase__ = {"code_eval"}
lowerCAmelCase__ = os.name == "nt"
lowerCAmelCase__ = {"bertscore", "frugalscore", "perplexity"}
lowerCAmelCase__ = importlib.util.find_spec("transformers") is not None
def _lowerCAmelCase( __A ):
@wraps(__A )
def wrapper(self , __A ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , __A )
return wrapper
def _lowerCAmelCase( __A ):
@wraps(__A )
def wrapper(self , __A ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , __A )
return wrapper
def _lowerCAmelCase( __A ):
@wraps(__A )
def wrapper(self , __A ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , __A )
return wrapper
def _lowerCAmelCase( ):
UpperCAmelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_snake_case , _snake_case , _snake_case )
@local
class __magic_name__ ( parameterized.TestCase ):
UpperCAmelCase = {}
UpperCAmelCase = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = "[...]"
UpperCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase__ ) ).module_path )
UpperCAmelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase__ )
# check parameters
UpperCAmelCase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCAmelCase__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
UpperCAmelCase = doctest.testmod(lowerCAmelCase__ , verbose=lowerCAmelCase__ , raise_on_error=lowerCAmelCase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def _UpperCamelCase ( self : str , lowerCAmelCase__ : Optional[int] ) -> str:
UpperCAmelCase = "[...]"
UpperCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
UpperCAmelCase = doctest.testmod(lowerCAmelCase__ , verbose=lowerCAmelCase__ , raise_on_error=lowerCAmelCase__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase__ ):
yield
else:
yield
@contextmanager
def _UpperCamelCase ( self : List[str] ) -> int:
def load_local_metric(lowerCAmelCase__ : Tuple , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Dict ):
return load_metric(os.path.join("metrics" , lowerCAmelCase__ ) , *lowerCAmelCase__ , **lowerCAmelCase__ )
with patch("datasets.load_metric" ) as mock_load_metric:
UpperCAmelCase = load_local_metric
yield
@classmethod
def _UpperCamelCase ( cls : List[Any] , lowerCAmelCase__ : Any ) -> str:
def wrapper(lowerCAmelCase__ : List[Any] ):
UpperCAmelCase = contextmanager(lowerCAmelCase__ )
UpperCAmelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def _lowerCAmelCase( __A ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class __magic_name__ ( _snake_case ):
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : Any ) -> int:
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
UpperCAmelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def _lowerCAmelCase( __A ):
import torch
def bert_cos_score_idf(__A , __A , *__A , **__A ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__A ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
UpperCAmelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def _lowerCAmelCase( __A ):
def load_from_checkpoint(__A ):
class __magic_name__ :
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : str ) -> Dict:
assert len(lowerCAmelCase__ ) == 2
UpperCAmelCase = [0.19, 0.92]
return scores, sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
UpperCAmelCase = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
UpperCAmelCase = load_from_checkpoint
yield
def _lowerCAmelCase( ):
UpperCAmelCase = load_metric(os.path.join("metrics" , "seqeval" ) )
UpperCAmelCase = "ERROR"
UpperCAmelCase = F"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(__A , match=re.escape(__A ) ):
metric.compute(predictions=[] , references=[] , scheme=__A )
| 716 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 1 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class __magic_name__ :
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : str=6.0 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Any="fp4" , lowerCAmelCase__ : List[Any]=False , **lowerCAmelCase__ : Any , ) -> Optional[int]:
UpperCAmelCase = load_in_abit
UpperCAmelCase = load_in_abit
UpperCAmelCase = llm_inta_threshold
UpperCAmelCase = llm_inta_skip_modules
UpperCAmelCase = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase = llm_inta_has_fpaa_weight
UpperCAmelCase = bnb_abit_quant_type
UpperCAmelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase = torch.floataa
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.dtype ):
UpperCAmelCase = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def _UpperCamelCase ( self : str ) -> int:
if not isinstance(self.llm_inta_threshold , lowerCAmelCase__ ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase__ ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase__ ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase__ ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase__ ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase__ ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def _UpperCamelCase ( self : str ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCamelCase ( self : str ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Any ) -> List[str]:
UpperCAmelCase = cls(**lowerCAmelCase__ )
UpperCAmelCase = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
to_remove.append(lowerCAmelCase__ )
for key in to_remove:
kwargs.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : Union[str, os.PathLike] ) -> Optional[int]:
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
UpperCAmelCase = self.to_dict()
UpperCAmelCase = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + "\n"
writer.write(lowerCAmelCase__ )
def _UpperCamelCase ( self : List[Any] ) -> Dict[str, Any]:
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self : Union[str, Any] ) -> int:
return f"{self.__class__.__name__} {self.to_json_string()}"
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : bool = True ) -> str:
if use_diff is True:
UpperCAmelCase = self.to_diff_dict()
else:
UpperCAmelCase = self.to_dict()
return json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + "\n"
def _UpperCamelCase ( self : int ) -> Dict[str, Any]:
UpperCAmelCase = self.to_dict()
# get the default config dict
UpperCAmelCase = BitsAndBytesConfig().to_dict()
UpperCAmelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase = value
return serializable_config_dict
| 717 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _lowerCAmelCase( __A ):
UpperCAmelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowerCAmelCase( ):
with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase( __A , __A ):
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def _lowerCAmelCase( __A=None ):
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(__A , "utils" )
UpperCAmelCase = {
backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCAmelCase__ = "Usage of script: script_name <size_of_canvas:int>"
lowerCAmelCase__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def _lowerCAmelCase( __A ):
UpperCAmelCase = [[False for i in range(__A )] for j in range(__A )]
return canvas
def _lowerCAmelCase( __A ):
for i, row in enumerate(__A ):
for j, _ in enumerate(__A ):
UpperCAmelCase = bool(random.getrandbits(1 ) )
def _lowerCAmelCase( __A ):
UpperCAmelCase = np.array(__A )
UpperCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__A ):
for c, pt in enumerate(__A ):
UpperCAmelCase = __judge_point(
__A , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCAmelCase = current_canvas.tolist()
return return_canvas
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = 0
UpperCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCAmelCase = pt
if pt:
if alive < 2:
UpperCAmelCase = False
elif alive == 2 or alive == 3:
UpperCAmelCase = True
elif alive > 3:
UpperCAmelCase = False
else:
if alive == 3:
UpperCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCAmelCase__ = int(sys.argv[1])
# main working structure of this module.
lowerCAmelCase__ = create_canvas(canvas_size)
seed(c)
lowerCAmelCase__, lowerCAmelCase__ = plt.subplots()
fig.show()
lowerCAmelCase__ = ListedColormap(["w", "k"])
try:
while True:
lowerCAmelCase__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 718 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __magic_name__ ( _snake_case , _snake_case ):
UpperCAmelCase = """convnextv2"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=2_2_4 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_stages
UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_path_rate
UpperCAmelCase = image_size
UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 1 | 0 |
'''simple docstring'''
def _lowerCAmelCase( __A = 4000000 ):
UpperCAmelCase = []
UpperCAmelCase , UpperCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__A )
UpperCAmelCase , UpperCAmelCase = b, a + b
return sum(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 719 |
lowerCAmelCase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 1 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCAmelCase( __A ):
if isinstance(__A , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase = tf.shape(__A )
if tensor.shape == tf.TensorShape(__A ):
return dynamic
UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__A )]
def _lowerCAmelCase( __A , __A = None , __A = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=__A , name=__A )
def _lowerCAmelCase( __A , __A , __A , __A=1E-5 , __A=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__A , __A ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
UpperCAmelCase , UpperCAmelCase = tf.nn.moments(__A , axes=[axis] , keepdims=__A )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase = [1] * inputs.shape.rank
UpperCAmelCase = shape_list(__A )[axis]
UpperCAmelCase = tf.reshape(__A , __A )
UpperCAmelCase = tf.reshape(__A , __A )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase = tf.nn.batch_normalization(
__A , __A , __A , offset=__A , scale=__A , variance_epsilon=__A , )
return outputs
def _lowerCAmelCase( __A , __A=0 , __A=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase = tf.shape(__A )
UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__A , __A )
def _lowerCAmelCase( __A ):
if not isinstance(__A , tf.Tensor ):
UpperCAmelCase = tf.convert_to_tensor(__A ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowerCAmelCase( __A , __A , __A = "input_ids" ):
tf.debugging.assert_less(
__A , tf.cast(__A , dtype=tensor.dtype ) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(__A )}) must be smaller than the embedding "
F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def _lowerCAmelCase( __A , __A , __A ):
UpperCAmelCase = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase = [x for x in data if len(__A ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}" )
UpperCAmelCase = np.asarray(__A )
UpperCAmelCase = 1
UpperCAmelCase = np.array_split(__A , __A )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase = np.array_split(__A , __A )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__A ):
UpperCAmelCase = chunk_data
else:
UpperCAmelCase = data
def _lowerCAmelCase( __A , __A ):
if name in group.attrs:
UpperCAmelCase = [n.decode("utf8" ) if hasattr(__A , "decode" ) else n for n in group.attrs[name]]
else:
UpperCAmelCase = []
UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(__A , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowerCAmelCase( __A ):
def _expand_single_ad_tensor(__A ):
if isinstance(__A , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__A , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __A )
| 720 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCAmelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase = False
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return 3_2
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Any ) -> Optional[int]:
return 1_0_0
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCamelCase ( self : int ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase = MultilingualCLIP(lowerCAmelCase__ )
UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _UpperCamelCase ( self : str ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
UpperCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=0 ) -> str:
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
UpperCAmelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _UpperCamelCase ( self : str ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ) -> int:
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase = 0
UpperCAmelCase = "a hat"
UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 | 0 |
def _lowerCAmelCase( __A ):
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase = len(bin(__A )[3:] )
UpperCAmelCase = bin(abs(__A ) - (1 << binary_number_length) )[3:]
UpperCAmelCase = (
(
"1"
+ "0" * (binary_number_length - len(__A ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
def _lowerCAmelCase( __A , __A ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase( __A , __A=0 ):
return sorted(__A , key=lambda __A : x[column] )
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , __A ):
for j in range(max(0 , i - 6 ) , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(__A , __A )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A )
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(__A , __A )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__A )
UpperCAmelCase = dis_between_closest_in_strip(
__A , len(__A ) , __A )
return min(__A , __A )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = column_based_sort(__A , column=0 )
UpperCAmelCase = column_based_sort(__A , column=1 )
return (
closest_pair_of_points_sqr(
__A , __A , __A )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 1 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = VideoToVideoSDPipeline
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCAmelCase = False
# No `output_type`.
UpperCAmelCase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _UpperCamelCase ( self : str ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
UpperCAmelCase = CLIPTextModel(lowerCAmelCase__ )
UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=0 ) -> Optional[int]:
# 3 frames
UpperCAmelCase = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**lowerCAmelCase__ )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase = "np"
UpperCAmelCase = sd_pipe(**lowerCAmelCase__ ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
UpperCAmelCase = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase__ , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCamelCase ( self : str ) -> Tuple:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _UpperCamelCase ( self : List[str] ) -> Dict:
pass
def _UpperCamelCase ( self : List[str] ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=lowerCAmelCase__ )
UpperCAmelCase = video.to("cuda" )
UpperCAmelCase = "Spiderman is surfing"
UpperCAmelCase = pipe(lowerCAmelCase__ , video=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=3 , output_type="pt" ).frames
UpperCAmelCase = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 700 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]=7 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : List[str]=1_8 , lowerCAmelCase__ : Union[str, Any]=3_0 , lowerCAmelCase__ : List[str]=4_0_0 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : str=True , ) -> List[Any]:
UpperCAmelCase = size if size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = apply_ocr
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self : int ) -> Union[str, Any]:
UpperCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self : int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self : Any ) -> str:
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "apply_ocr" ) )
def _UpperCamelCase ( self : str ) -> Optional[Any]:
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def _UpperCamelCase ( self : Any ) -> int:
pass
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase__ )
self.assertIsInstance(encoding.boxes , lowerCAmelCase__ )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self : List[str] ) -> Tuple:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self : int ) -> int:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self : Dict ) -> Tuple:
# with apply_OCR = True
UpperCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
UpperCAmelCase = Image.open(ds[0]["file"] ).convert("RGB" )
UpperCAmelCase = image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
UpperCAmelCase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase__ )
self.assertListEqual(encoding.boxes , lowerCAmelCase__ )
# with apply_OCR = False
UpperCAmelCase = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ )
UpperCAmelCase = image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 701 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : str , **lowerCAmelCase__ : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Dict ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _UpperCamelCase ( self : int ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase__ )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertNotIn("labels" , lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _UpperCamelCase ( self : Any ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ["A long paragraph for summarization."]
UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = inputs["input_ids"]
UpperCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["Summary of the text.", "Another summary."]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
UpperCAmelCase = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
UpperCAmelCase = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 1 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 702 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ : int = logging.get_logger(__name__)
class __magic_name__ ( _snake_case ):
UpperCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : int = 0.9 , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Dict , ) -> None:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = size if size is not None else {"shortest_edge": 2_2_4}
UpperCAmelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
UpperCAmelCase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
UpperCAmelCase = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = crop_pct
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
UpperCAmelCase = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
UpperCAmelCase = int(size["height"] / crop_pct )
else:
UpperCAmelCase = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(lowerCAmelCase__ ) )
UpperCAmelCase = get_resize_output_image_size(lowerCAmelCase__ , size=lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
else:
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(lowerCAmelCase__ ) )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Dict , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[int] , ) -> Any:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[int] , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : str , ) -> PIL.Image.Image:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
UpperCAmelCase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , crop_pct=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """lxmert"""
UpperCAmelCase = {}
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : List[Any]=9_5_0_0 , lowerCAmelCase__ : Any=1_6_0_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : str=9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[Any]=2_0_4_8 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=6.67 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : List[Any] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 1 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCAmelCase__ = Mapping[str, np.ndarray]
lowerCAmelCase__ = Mapping[str, Any] # Is a nested dict.
lowerCAmelCase__ = 0.0_1
@dataclasses.dataclass(frozen=_snake_case )
class __magic_name__ :
UpperCAmelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCAmelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCAmelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCAmelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCAmelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCAmelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCAmelCase = None
# Templates used to generate this protein (prediction-only)
UpperCAmelCase = None
# Chain corresponding to each parent
UpperCAmelCase = None
def _lowerCAmelCase( __A ):
UpperCAmelCase = r"(\[[A-Z]+\]\n)"
UpperCAmelCase = [tag.strip() for tag in re.split(__A , __A ) if len(__A ) > 0]
UpperCAmelCase = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
UpperCAmelCase = ["N", "CA", "C"]
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
for g in groups:
if "[PRIMARY]" == g[0]:
UpperCAmelCase = g[1][0].strip()
for i in range(len(__A ) ):
if seq[i] not in residue_constants.restypes:
UpperCAmelCase = "X" # FIXME: strings are immutable
UpperCAmelCase = np.array(
[residue_constants.restype_order.get(__A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
UpperCAmelCase = []
for axis in range(3 ):
tertiary.append(list(map(__A , g[1][axis].split() ) ) )
UpperCAmelCase = np.array(__A )
UpperCAmelCase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
UpperCAmelCase = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
UpperCAmelCase = np.zeros(
(
len(__A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__A , atom_mask=__A , aatype=__A , residue_index=np.arange(len(__A ) ) , b_factors=__A , )
def _lowerCAmelCase( __A , __A = 0 ):
UpperCAmelCase = []
UpperCAmelCase = prot.remark
if remark is not None:
pdb_headers.append(F"REMARK {remark}" )
UpperCAmelCase = prot.parents
UpperCAmelCase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
UpperCAmelCase = [p for i, p in zip(__A , __A ) if i == chain_id]
if parents is None or len(__A ) == 0:
UpperCAmelCase = ["N/A"]
pdb_headers.append(F"PARENT {' '.join(__A )}" )
return pdb_headers
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = pdb_str.split("\n" )
UpperCAmelCase = prot.remark
if remark is not None:
out_pdb_lines.append(F"REMARK {remark}" )
UpperCAmelCase = 42
if prot.parents is not None and len(prot.parents ) > 0:
UpperCAmelCase = []
if prot.parents_chain_index is not None:
UpperCAmelCase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__A ) , [] )
parent_dict[str(__A )].append(__A )
UpperCAmelCase = max([int(__A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
UpperCAmelCase = parent_dict.get(str(__A ) , ["N/A"] )
parents_per_chain.append(__A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
UpperCAmelCase = [["N/A"]]
def make_parent_line(__A ) -> str:
return F"PARENT {' '.join(__A )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
UpperCAmelCase = 0
for i, l in enumerate(__A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__A ):
UpperCAmelCase = parents_per_chain[chain_counter]
else:
UpperCAmelCase = ["N/A"]
out_pdb_lines.append(make_parent_line(__A ) )
return "\n".join(__A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = residue_constants.restypes + ["X"]
def res_atoa(__A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
UpperCAmelCase = residue_constants.atom_types
UpperCAmelCase = []
UpperCAmelCase = prot.atom_mask
UpperCAmelCase = prot.aatype
UpperCAmelCase = prot.atom_positions
UpperCAmelCase = prot.residue_index.astype(np.intaa )
UpperCAmelCase = prot.b_factors
UpperCAmelCase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
UpperCAmelCase = get_pdb_headers(__A )
if len(__A ) > 0:
pdb_lines.extend(__A )
UpperCAmelCase = aatype.shape[0]
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = string.ascii_uppercase
UpperCAmelCase = None
# Add all atom sites.
for i in range(__A ):
UpperCAmelCase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
UpperCAmelCase = "ATOM"
UpperCAmelCase = atom_name if len(__A ) == 4 else F" {atom_name}"
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = 1.00
UpperCAmelCase = atom_name[0] # Protein supports only C, N, O, S, this works.
UpperCAmelCase = ""
UpperCAmelCase = "A"
if chain_index is not None:
UpperCAmelCase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
UpperCAmelCase = (
F"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
F"{res_name_a:>3} {chain_tag:>1}"
F"{residue_index[i]:>4}{insertion_code:>1} "
F"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
F"{occupancy:>6.2f}{b_factor:>6.2f} "
F"{element:>2}{charge:>2}"
)
pdb_lines.append(__A )
atom_index += 1
UpperCAmelCase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
UpperCAmelCase = True
UpperCAmelCase = chain_index[i + 1]
if should_terminate:
# Close the chain.
UpperCAmelCase = "TER"
UpperCAmelCase = (
F"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(__A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__A , __A ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__A )
def _lowerCAmelCase( __A ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowerCAmelCase( __A , __A , __A = None , __A = None , __A = None , __A = None , __A = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__A , remark=__A , parents=__A , parents_chain_index=__A , )
| 704 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = 1
UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase = pre_numerator
UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase = cur_numerator
UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 0 |
from math import pow, sqrt
def _lowerCAmelCase( *__A ):
UpperCAmelCase = len(__A ) > 0 and all(value > 0.0 for value in values )
return result
def _lowerCAmelCase( __A , __A ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def _lowerCAmelCase( __A , __A , __A ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def _lowerCAmelCase( __A , __A , __A ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def _lowerCAmelCase( __A , __A , __A ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def _lowerCAmelCase( __A , __A , __A ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 705 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowerCAmelCase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowerCAmelCase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """whisper"""
UpperCAmelCase = ["""past_key_values"""]
UpperCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , lowerCAmelCase__ : List[str]=5_1_8_6_5 , lowerCAmelCase__ : int=8_0 , lowerCAmelCase__ : Optional[int]=6 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : int=1_5_3_6 , lowerCAmelCase__ : Any=1_5_3_6 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Optional[Any]=5_0_2_5_7 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : List[str]=2_5_6 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Dict=1_5_0_0 , lowerCAmelCase__ : List[Any]=4_4_8 , lowerCAmelCase__ : str=5_0_2_5_6 , lowerCAmelCase__ : Union[str, Any]=5_0_2_5_6 , lowerCAmelCase__ : Any=5_0_2_5_6 , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Tuple=[2_2_0, 5_0_2_5_6] , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[Any]=2_5_6 , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : int=0.05 , lowerCAmelCase__ : Any=1_0 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : Tuple=1_0 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Optional[int]=7 , **lowerCAmelCase__ : Tuple , ) -> Optional[Any]:
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , suppress_tokens=lowerCAmelCase__ , begin_suppress_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
class __magic_name__ ( _snake_case ):
@property
def _UpperCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase = {0: "batch"}
else:
UpperCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" )
return common_inputs
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional["TensorType"] = None , lowerCAmelCase__ : int = 2_2_0_5_0 , lowerCAmelCase__ : float = 5.0 , lowerCAmelCase__ : int = 2_2_0 , ) -> Mapping[str, Any]:
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCAmelCase__ , framework=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , time_duration=lowerCAmelCase__ , frequency=lowerCAmelCase__ , )
UpperCAmelCase = encoder_inputs["input_features"].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = encoder_inputs.pop("input_features" )
UpperCAmelCase = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _UpperCamelCase ( self : List[str] ) -> float:
return 1e-3
| 706 |
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.0_0_9
def _lowerCAmelCase( __A , __A="train" ):
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase( __A , __A=m ):
UpperCAmelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase( __A ):
UpperCAmelCase = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def _lowerCAmelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase = 0.000002
UpperCAmelCase = 0
UpperCAmelCase = 0
while True:
j += 1
UpperCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase = get_cost_derivative(i - 1 )
UpperCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCAmelCase( ):
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 1 | 0 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = []
UpperCAmelCase = set({"(", "[", "{"} )
UpperCAmelCase = set({")", "]", "}"} )
UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(__A ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__A ) == 0 or (len(__A ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__A ) == 0
def _lowerCAmelCase( ):
UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(__A ):
print(__A , "is balanced" )
else:
print(__A , "is not balanced" )
if __name__ == "__main__":
main()
| 707 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 0 |